diff --git a/.circleci/config.yml b/.circleci/config.yml
index d8ccf68f..daab703b 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -17,6 +17,4 @@ workflows:
mapping: |
.circleci/.* run-all-workflows true
gpt4all-backend/.* run-all-workflows true
- gpt4all-bindings/python/.* run-python-workflow true
- gpt4all-bindings/typescript/.* run-ts-workflow true
gpt4all-chat/.* run-chat-workflow true
diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml
index 05c78eca..8adbee8e 100644
--- a/.circleci/continue_config.yml
+++ b/.circleci/continue_config.yml
@@ -8,15 +8,9 @@ parameters:
run-all-workflows:
type: boolean
default: false
- run-python-workflow:
- type: boolean
- default: false
run-chat-workflow:
type: boolean
default: false
- run-ts-workflow:
- type: boolean
- default: false
job-macos-executor: &job-macos-executor
macos:
@@ -1266,25 +1260,6 @@ jobs:
paths:
- ../.ccache
- build-ts-docs:
- docker:
- - image: cimg/base:stable
- steps:
- - checkout
- - node/install:
- node-version: "18.16"
- - run: node --version
- - run: corepack enable
- - node/install-packages:
- pkg-manager: npm
- app-dir: gpt4all-bindings/typescript
- override-ci-command: npm install --ignore-scripts
- - run:
- name: build docs ts yo
- command: |
- cd gpt4all-bindings/typescript
- npm run docs:build
-
deploy-docs:
docker:
- image: circleci/python:3.8
@@ -1295,532 +1270,17 @@ jobs:
command: |
sudo apt-get update
sudo apt-get -y install python3 python3-pip
- sudo pip3 install awscli --upgrade
- sudo pip3 install mkdocs mkdocs-material mkautodoc 'mkdocstrings[python]' markdown-captions pillow cairosvg
+ sudo pip3 install -Ur requirements-docs.txt awscli
- run:
name: Make Documentation
- command: |
- cd gpt4all-bindings/python
- mkdocs build
+ command: mkdocs build
- run:
name: Deploy Documentation
- command: |
- cd gpt4all-bindings/python
- aws s3 sync --delete site/ s3://docs.gpt4all.io/
+ command: aws s3 sync --delete site/ s3://docs.gpt4all.io/
- run:
name: Invalidate docs.gpt4all.io cloudfront
command: aws cloudfront create-invalidation --distribution-id E1STQOW63QL2OH --paths "/*"
- build-py-linux:
- machine:
- image: ubuntu-2204:current
- steps:
- - checkout
- - restore_cache:
- keys:
- - ccache-gpt4all-linux-amd64-
- - run:
- <<: *job-linux-install-backend-deps
- - run:
- name: Build C library
- no_output_timeout: 30m
- command: |
- export PATH=$PATH:/usr/local/cuda/bin
- git submodule update --init --recursive
- ccache -o "cache_dir=${PWD}/../.ccache" -o max_size=500M -p -z
- cd gpt4all-backend
- cmake -B build -G Ninja \
- -DCMAKE_BUILD_TYPE=Release \
- -DCMAKE_C_COMPILER=clang-19 \
- -DCMAKE_CXX_COMPILER=clang++-19 \
- -DCMAKE_CXX_COMPILER_AR=ar \
- -DCMAKE_CXX_COMPILER_RANLIB=ranlib \
- -DCMAKE_C_COMPILER_LAUNCHER=ccache \
- -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
- -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache \
- -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON \
- -DCMAKE_CUDA_ARCHITECTURES='50-virtual;52-virtual;61-virtual;70-virtual;75-virtual'
- cmake --build build -j$(nproc)
- ccache -s
- - run:
- name: Build wheel
- command: |
- cd gpt4all-bindings/python/
- python setup.py bdist_wheel --plat-name=manylinux1_x86_64
- - store_artifacts:
- path: gpt4all-bindings/python/dist
- - save_cache:
- key: ccache-gpt4all-linux-amd64-{{ epoch }}
- when: always
- paths:
- - ../.ccache
- - persist_to_workspace:
- root: gpt4all-bindings/python/dist
- paths:
- - "*.whl"
-
- build-py-macos:
- <<: *job-macos-executor
- steps:
- - checkout
- - restore_cache:
- keys:
- - ccache-gpt4all-macos-
- - run:
- <<: *job-macos-install-deps
- - run:
- name: Install dependencies
- command: |
- pip install setuptools wheel cmake
- - run:
- name: Build C library
- no_output_timeout: 30m
- command: |
- git submodule update --init # don't use --recursive because macOS doesn't use Kompute
- ccache -o "cache_dir=${PWD}/../.ccache" -o max_size=500M -p -z
- cd gpt4all-backend
- cmake -B build \
- -DCMAKE_BUILD_TYPE=Release \
- -DCMAKE_C_COMPILER=/opt/homebrew/opt/llvm/bin/clang \
- -DCMAKE_CXX_COMPILER=/opt/homebrew/opt/llvm/bin/clang++ \
- -DCMAKE_RANLIB=/usr/bin/ranlib \
- -DCMAKE_C_COMPILER_LAUNCHER=ccache \
- -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
- -DBUILD_UNIVERSAL=ON \
- -DCMAKE_OSX_DEPLOYMENT_TARGET=12.6 \
- -DGGML_METAL_MACOSX_VERSION_MIN=12.6
- cmake --build build --parallel
- ccache -s
- - run:
- name: Build wheel
- command: |
- cd gpt4all-bindings/python
- python setup.py bdist_wheel --plat-name=macosx_10_15_universal2
- - store_artifacts:
- path: gpt4all-bindings/python/dist
- - save_cache:
- key: ccache-gpt4all-macos-{{ epoch }}
- when: always
- paths:
- - ../.ccache
- - persist_to_workspace:
- root: gpt4all-bindings/python/dist
- paths:
- - "*.whl"
-
- build-py-windows:
- machine:
- image: windows-server-2022-gui:2024.04.1
- resource_class: windows.large
- shell: powershell.exe -ExecutionPolicy Bypass
- steps:
- - checkout
- - run:
- name: Update Submodules
- command: |
- git submodule sync
- git submodule update --init --recursive
- - restore_cache:
- keys:
- - ccache-gpt4all-win-amd64-
- - run:
- name: Install dependencies
- command:
- choco install -y ccache cmake ninja wget --installargs 'ADD_CMAKE_TO_PATH=System'
- - run:
- name: Install VulkanSDK
- command: |
- wget.exe "https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe"
- .\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install
- - run:
- name: Install CUDA Toolkit
- command: |
- wget.exe "https://developer.download.nvidia.com/compute/cuda/11.8.0/network_installers/cuda_11.8.0_windows_network.exe"
- .\cuda_11.8.0_windows_network.exe -s cudart_11.8 nvcc_11.8 cublas_11.8 cublas_dev_11.8
- - run:
- name: Install Python dependencies
- command: pip install setuptools wheel cmake
- - run:
- name: Build C library
- no_output_timeout: 30m
- command: |
- $vsInstallPath = & "C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe" -property installationpath
- Import-Module "${vsInstallPath}\Common7\Tools\Microsoft.VisualStudio.DevShell.dll"
- Enter-VsDevShell -VsInstallPath "$vsInstallPath" -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'
-
- $Env:PATH += ";C:\VulkanSDK\1.3.261.1\bin"
- $Env:VULKAN_SDK = "C:\VulkanSDK\1.3.261.1"
- ccache -o "cache_dir=${pwd}\..\.ccache" -o max_size=500M -p -z
- cd gpt4all-backend
- cmake -B build -G Ninja `
- -DCMAKE_BUILD_TYPE=Release `
- -DCMAKE_C_COMPILER_LAUNCHER=ccache `
- -DCMAKE_CXX_COMPILER_LAUNCHER=ccache `
- -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache `
- -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON `
- -DCMAKE_CUDA_ARCHITECTURES='50-virtual;52-virtual;61-virtual;70-virtual;75-virtual'
- cmake --build build --parallel
- ccache -s
- - run:
- name: Build wheel
- command: |
- cd gpt4all-bindings/python
- python setup.py bdist_wheel --plat-name=win_amd64
- - store_artifacts:
- path: gpt4all-bindings/python/dist
- - save_cache:
- key: ccache-gpt4all-win-amd64-{{ epoch }}
- when: always
- paths:
- - ..\.ccache
- - persist_to_workspace:
- root: gpt4all-bindings/python/dist
- paths:
- - "*.whl"
-
- deploy-wheels:
- docker:
- - image: circleci/python:3.8
- steps:
- - setup_remote_docker
- - attach_workspace:
- at: /tmp/workspace
- - run:
- name: Install dependencies
- command: |
- sudo apt-get update
- sudo apt-get install -y build-essential cmake
- pip install setuptools wheel twine
- - run:
- name: Upload Python package
- command: |
- twine upload /tmp/workspace/*.whl --username __token__ --password $PYPI_CRED
- - store_artifacts:
- path: /tmp/workspace
-
- build-bindings-backend-linux:
- machine:
- image: ubuntu-2204:current
- steps:
- - checkout
- - run:
- name: Update Submodules
- command: |
- git submodule sync
- git submodule update --init --recursive
- - restore_cache:
- keys:
- - ccache-gpt4all-linux-amd64-
- - run:
- <<: *job-linux-install-backend-deps
- - run:
- name: Build Libraries
- no_output_timeout: 30m
- command: |
- export PATH=$PATH:/usr/local/cuda/bin
- ccache -o "cache_dir=${PWD}/../.ccache" -o max_size=500M -p -z
- cd gpt4all-backend
- mkdir -p runtimes/build
- cd runtimes/build
- cmake ../.. -G Ninja \
- -DCMAKE_BUILD_TYPE=Release \
- -DCMAKE_C_COMPILER=clang-19 \
- -DCMAKE_CXX_COMPILER=clang++-19 \
- -DCMAKE_CXX_COMPILER_AR=ar \
- -DCMAKE_CXX_COMPILER_RANLIB=ranlib \
- -DCMAKE_BUILD_TYPE=Release \
- -DCMAKE_C_COMPILER_LAUNCHER=ccache \
- -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
- -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache \
- -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON
- cmake --build . -j$(nproc)
- ccache -s
- mkdir ../linux-x64
- cp -L *.so ../linux-x64 # otherwise persist_to_workspace seems to mess symlinks
- - save_cache:
- key: ccache-gpt4all-linux-amd64-{{ epoch }}
- when: always
- paths:
- - ../.ccache
- - persist_to_workspace:
- root: gpt4all-backend
- paths:
- - runtimes/linux-x64/*.so
-
- build-bindings-backend-macos:
- <<: *job-macos-executor
- steps:
- - checkout
- - run:
- name: Update Submodules
- command: |
- git submodule sync
- git submodule update --init --recursive
- - restore_cache:
- keys:
- - ccache-gpt4all-macos-
- - run:
- <<: *job-macos-install-deps
- - run:
- name: Build Libraries
- no_output_timeout: 30m
- command: |
- ccache -o "cache_dir=${PWD}/../.ccache" -o max_size=500M -p -z
- cd gpt4all-backend
- mkdir -p runtimes/build
- cd runtimes/build
- cmake ../.. \
- -DCMAKE_BUILD_TYPE=Release \
- -DCMAKE_C_COMPILER=/opt/homebrew/opt/llvm/bin/clang \
- -DCMAKE_CXX_COMPILER=/opt/homebrew/opt/llvm/bin/clang++ \
- -DCMAKE_RANLIB=/usr/bin/ranlib \
- -DCMAKE_C_COMPILER_LAUNCHER=ccache \
- -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
- -DBUILD_UNIVERSAL=ON \
- -DCMAKE_OSX_DEPLOYMENT_TARGET=12.6 \
- -DGGML_METAL_MACOSX_VERSION_MIN=12.6
- cmake --build . --parallel
- ccache -s
- mkdir ../osx-x64
- cp -L *.dylib ../osx-x64
- cp ../../llama.cpp-mainline/*.metal ../osx-x64
- ls ../osx-x64
- - save_cache:
- key: ccache-gpt4all-macos-{{ epoch }}
- when: always
- paths:
- - ../.ccache
- - persist_to_workspace:
- root: gpt4all-backend
- paths:
- - runtimes/osx-x64/*.dylib
- - runtimes/osx-x64/*.metal
-
- build-bindings-backend-windows:
- machine:
- image: windows-server-2022-gui:2024.04.1
- resource_class: windows.large
- shell: powershell.exe -ExecutionPolicy Bypass
- steps:
- - checkout
- - run:
- name: Update Submodules
- command: |
- git submodule sync
- git submodule update --init --recursive
- - restore_cache:
- keys:
- - ccache-gpt4all-win-amd64-
- - run:
- name: Install dependencies
- command: |
- choco install -y ccache cmake ninja wget --installargs 'ADD_CMAKE_TO_PATH=System'
- - run:
- name: Install VulkanSDK
- command: |
- wget.exe "https://sdk.lunarg.com/sdk/download/1.3.261.1/windows/VulkanSDK-1.3.261.1-Installer.exe"
- .\VulkanSDK-1.3.261.1-Installer.exe --accept-licenses --default-answer --confirm-command install
- - run:
- name: Install CUDA Toolkit
- command: |
- wget.exe "https://developer.download.nvidia.com/compute/cuda/11.8.0/network_installers/cuda_11.8.0_windows_network.exe"
- .\cuda_11.8.0_windows_network.exe -s cudart_11.8 nvcc_11.8 cublas_11.8 cublas_dev_11.8
- - run:
- name: Build Libraries
- no_output_timeout: 30m
- command: |
- $vsInstallPath = & "C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe" -property installationpath
- Import-Module "${vsInstallPath}\Common7\Tools\Microsoft.VisualStudio.DevShell.dll"
- Enter-VsDevShell -VsInstallPath "$vsInstallPath" -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'
-
- $Env:Path += ";C:\VulkanSDK\1.3.261.1\bin"
- $Env:VULKAN_SDK = "C:\VulkanSDK\1.3.261.1"
- ccache -o "cache_dir=${pwd}\..\.ccache" -o max_size=500M -p -z
- cd gpt4all-backend
- mkdir runtimes/win-x64_msvc
- cd runtimes/win-x64_msvc
- cmake -S ../.. -B . -G Ninja `
- -DCMAKE_BUILD_TYPE=Release `
- -DCMAKE_C_COMPILER_LAUNCHER=ccache `
- -DCMAKE_CXX_COMPILER_LAUNCHER=ccache `
- -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache `
- -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON
- cmake --build . --parallel
- ccache -s
- cp bin/Release/*.dll .
- - save_cache:
- key: ccache-gpt4all-win-amd64-{{ epoch }}
- when: always
- paths:
- - ..\.ccache
- - persist_to_workspace:
- root: gpt4all-backend
- paths:
- - runtimes/win-x64_msvc/*.dll
-
- build-nodejs-linux:
- docker:
- - image: cimg/base:stable
- steps:
- - checkout
- - attach_workspace:
- at: /tmp/gpt4all-backend
- - node/install:
- install-yarn: true
- node-version: "18.16"
- - run: node --version
- - run: corepack enable
- - node/install-packages:
- app-dir: gpt4all-bindings/typescript
- pkg-manager: yarn
- override-ci-command: yarn install
- - run:
- command: |
- cd gpt4all-bindings/typescript
- yarn prebuildify -t 18.16.0 --napi
- - run:
- command: |
- mkdir -p gpt4all-backend/prebuilds/linux-x64
- mkdir -p gpt4all-backend/runtimes/linux-x64
- cp /tmp/gpt4all-backend/runtimes/linux-x64/*-*.so gpt4all-backend/runtimes/linux-x64
- cp gpt4all-bindings/typescript/prebuilds/linux-x64/*.node gpt4all-backend/prebuilds/linux-x64
- - persist_to_workspace:
- root: gpt4all-backend
- paths:
- - prebuilds/linux-x64/*.node
- - runtimes/linux-x64/*-*.so
-
- build-nodejs-macos:
- <<: *job-macos-executor
- steps:
- - checkout
- - attach_workspace:
- at: /tmp/gpt4all-backend
- - node/install:
- install-yarn: true
- node-version: "18.16"
- - run: node --version
- - run: corepack enable
- - node/install-packages:
- app-dir: gpt4all-bindings/typescript
- pkg-manager: yarn
- override-ci-command: yarn install
- - run:
- command: |
- cd gpt4all-bindings/typescript
- yarn prebuildify -t 18.16.0 --napi
- - run:
- name: "Persisting all necessary things to workspace"
- command: |
- mkdir -p gpt4all-backend/prebuilds/darwin-x64
- mkdir -p gpt4all-backend/runtimes/darwin
- cp /tmp/gpt4all-backend/runtimes/osx-x64/*-*.* gpt4all-backend/runtimes/darwin
- cp gpt4all-bindings/typescript/prebuilds/darwin-x64/*.node gpt4all-backend/prebuilds/darwin-x64
- - persist_to_workspace:
- root: gpt4all-backend
- paths:
- - prebuilds/darwin-x64/*.node
- - runtimes/darwin/*-*.*
-
- build-nodejs-windows:
- executor:
- name: win/default
- size: large
- shell: powershell.exe -ExecutionPolicy Bypass
- steps:
- - checkout
- - attach_workspace:
- at: /tmp/gpt4all-backend
- - run: choco install wget -y
- - run:
- command: |
- wget.exe "https://nodejs.org/dist/v18.16.0/node-v18.16.0-x86.msi" -P C:\Users\circleci\Downloads\
- MsiExec.exe /i C:\Users\circleci\Downloads\node-v18.16.0-x86.msi /qn
- - run:
- command: |
- Start-Process powershell -verb runAs -Args "-start GeneralProfile"
- nvm install 18.16.0
- nvm use 18.16.0
- - run: node --version
- - run: corepack enable
- - run:
- command: |
- npm install -g yarn
- cd gpt4all-bindings/typescript
- yarn install
- - run:
- command: |
- cd gpt4all-bindings/typescript
- yarn prebuildify -t 18.16.0 --napi
- - run:
- command: |
- mkdir -p gpt4all-backend/prebuilds/win32-x64
- mkdir -p gpt4all-backend/runtimes/win32-x64
- cp /tmp/gpt4all-backend/runtimes/win-x64_msvc/*-*.dll gpt4all-backend/runtimes/win32-x64
- cp gpt4all-bindings/typescript/prebuilds/win32-x64/*.node gpt4all-backend/prebuilds/win32-x64
-
- - persist_to_workspace:
- root: gpt4all-backend
- paths:
- - prebuilds/win32-x64/*.node
- - runtimes/win32-x64/*-*.dll
-
- deploy-npm-pkg:
- docker:
- - image: cimg/base:stable
- steps:
- - attach_workspace:
- at: /tmp/gpt4all-backend
- - checkout
- - node/install:
- install-yarn: true
- node-version: "18.16"
- - run: node --version
- - run: corepack enable
- - run:
- command: |
- cd gpt4all-bindings/typescript
- # excluding llmodel. nodejs bindings dont need llmodel.dll
- mkdir -p runtimes/win32-x64/native
- mkdir -p prebuilds/win32-x64/
- cp /tmp/gpt4all-backend/runtimes/win-x64_msvc/*-*.dll runtimes/win32-x64/native/
- cp /tmp/gpt4all-backend/prebuilds/win32-x64/*.node prebuilds/win32-x64/
-
- mkdir -p runtimes/linux-x64/native
- mkdir -p prebuilds/linux-x64/
- cp /tmp/gpt4all-backend/runtimes/linux-x64/*-*.so runtimes/linux-x64/native/
- cp /tmp/gpt4all-backend/prebuilds/linux-x64/*.node prebuilds/linux-x64/
-
- # darwin has univeral runtime libraries
- mkdir -p runtimes/darwin/native
- mkdir -p prebuilds/darwin-x64/
-
- cp /tmp/gpt4all-backend/runtimes/darwin/*-*.* runtimes/darwin/native/
-
- cp /tmp/gpt4all-backend/prebuilds/darwin-x64/*.node prebuilds/darwin-x64/
-
- # Fallback build if user is not on above prebuilds
- mv -f binding.ci.gyp binding.gyp
-
- mkdir gpt4all-backend
- cd ../../gpt4all-backend
- mv llmodel.h llmodel.cpp llmodel_c.cpp llmodel_c.h sysinfo.h dlhandle.h ../gpt4all-bindings/typescript/gpt4all-backend/
-
- # Test install
- - node/install-packages:
- app-dir: gpt4all-bindings/typescript
- pkg-manager: yarn
- override-ci-command: yarn install
- - run:
- command: |
- cd gpt4all-bindings/typescript
- yarn run test
- - run:
- command: |
- cd gpt4all-bindings/typescript
- npm set //registry.npmjs.org/:_authToken=$NPM_TOKEN
- npm publish
-
# only run a job on the main branch
job_only_main: &job_only_main
filters:
@@ -1849,8 +1309,6 @@ workflows:
not:
or:
- << pipeline.parameters.run-all-workflows >>
- - << pipeline.parameters.run-python-workflow >>
- - << pipeline.parameters.run-ts-workflow >>
- << pipeline.parameters.run-chat-workflow >>
- equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]
jobs:
@@ -2079,87 +1537,9 @@ workflows:
when:
and:
- equal: [ << pipeline.git.branch >>, main ]
- - or:
- - << pipeline.parameters.run-all-workflows >>
- - << pipeline.parameters.run-python-workflow >>
+ - << pipeline.parameters.run-all-workflows >>
- not:
equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]
jobs:
- deploy-docs:
context: gpt4all
- build-python:
- when:
- and:
- - or: [ << pipeline.parameters.run-all-workflows >>, << pipeline.parameters.run-python-workflow >> ]
- - not:
- equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]
- jobs:
- - pypi-hold:
- <<: *job_only_main
- type: approval
- - hold:
- type: approval
- - build-py-linux:
- requires:
- - hold
- - build-py-macos:
- requires:
- - hold
- - build-py-windows:
- requires:
- - hold
- - deploy-wheels:
- <<: *job_only_main
- context: gpt4all
- requires:
- - pypi-hold
- - build-py-windows
- - build-py-linux
- - build-py-macos
- build-bindings:
- when:
- and:
- - or: [ << pipeline.parameters.run-all-workflows >>, << pipeline.parameters.run-ts-workflow >> ]
- - not:
- equal: [ << pipeline.trigger_source >>, scheduled_pipeline ]
- jobs:
- - backend-hold:
- type: approval
- - nodejs-hold:
- type: approval
- - npm-hold:
- <<: *job_only_main
- type: approval
- - docs-hold:
- type: approval
- - build-bindings-backend-linux:
- requires:
- - backend-hold
- - build-bindings-backend-macos:
- requires:
- - backend-hold
- - build-bindings-backend-windows:
- requires:
- - backend-hold
- - build-nodejs-linux:
- requires:
- - nodejs-hold
- - build-bindings-backend-linux
- - build-nodejs-windows:
- requires:
- - nodejs-hold
- - build-bindings-backend-windows
- - build-nodejs-macos:
- requires:
- - nodejs-hold
- - build-bindings-backend-macos
- - build-ts-docs:
- requires:
- - docs-hold
- - deploy-npm-pkg:
- <<: *job_only_main
- requires:
- - npm-hold
- - build-nodejs-linux
- - build-nodejs-windows
- - build-nodejs-macos
diff --git a/.github/ISSUE_TEMPLATE/bindings-bug.md b/.github/ISSUE_TEMPLATE/bindings-bug.md
deleted file mode 100644
index cbf0d49d..00000000
--- a/.github/ISSUE_TEMPLATE/bindings-bug.md
+++ /dev/null
@@ -1,35 +0,0 @@
----
-name: "\U0001F6E0 Bindings Bug Report"
-about: A bug report for the GPT4All Bindings
-labels: ["bindings", "bug-unconfirmed"]
----
-
-
-
-### Bug Report
-
-
-
-### Example Code
-
-
-
-### Steps to Reproduce
-
-
-
-1.
-2.
-3.
-
-### Expected Behavior
-
-
-
-### Your Environment
-
-- Bindings version (e.g. "Version" from `pip show gpt4all`):
-- Operating System:
-- Chat model used (if applicable):
-
-
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index 6838a0b8..6907aa1e 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -29,13 +29,6 @@ Jared Van Bortel ([@cebtenzzre](https://github.com/cebtenzzre))
E-mail: jared@nomic.ai
Discord: `@cebtenzzre`
- gpt4all-backend
-- Python binding
-- Python CLI app
-
-Jacob Nguyen ([@jacoobes](https://github.com/jacoobes))
-Discord: `@jacoobes`
-E-mail: `jacoobes@sern.dev`
-- TypeScript binding
Dominik ([@cosmic-snow](https://github.com/cosmic-snow))
E-mail: cosmic-snow@mailfence.com
@@ -45,7 +38,7 @@ Discord: `@cosmic__snow`
Max Cembalest ([@mcembalest](https://github.com/mcembalest))
E-mail: max@nomic.ai
Discord: `@maxcembalest.`
-- Official documentation (gpt4all-bindings/python/docs -> https://docs.gpt4all.io/)
+- Official documentation (docs -> https://docs.gpt4all.io/)
Thiago Ramos ([@thiagojramos](https://github.com/thiagojramos))
E-mail: thiagojramos@outlook.com
diff --git a/README.md b/README.md
index 4450198c..13c812d1 100644
--- a/README.md
+++ b/README.md
@@ -32,7 +32,7 @@ GPT4All is made possible by our compute partner
-
Windows Installer
+
Windows Installer
—
@@ -42,12 +42,12 @@ GPT4All is made possible by our compute partner
-
macOS Installer
+
macOS Installer
—
—
-
Ubuntu Installer
+
Ubuntu Installer
—
@@ -74,24 +74,6 @@ See the full [System Requirements](gpt4all-chat/system_requirements.md) for more
-## Install GPT4All Python
-
-`gpt4all` gives you access to LLMs with our Python client around [`llama.cpp`](https://github.com/ggerganov/llama.cpp) implementations.
-
-Nomic contributes to open source software like [`llama.cpp`](https://github.com/ggerganov/llama.cpp) to make LLMs accessible and efficient **for all**.
-
-```bash
-pip install gpt4all
-```
-
-```python
-from gpt4all import GPT4All
-model = GPT4All("Meta-Llama-3-8B-Instruct.Q4_0.gguf") # downloads / loads a 4.66GB LLM
-with model.chat_session():
- print(model.generate("How can I run LLMs efficiently on my laptop?", max_tokens=1024))
-```
-
-
## Integrations
:parrot::link: [Langchain](https://python.langchain.com/v0.2/docs/integrations/providers/gpt4all/)
@@ -119,7 +101,7 @@ Please see CONTRIBUTING.md and follow the issues, bug reports, and PR markdown t
Check project discord, with project owners, or through existing issues/PRs to avoid duplicate work.
Please make sure to tag all of the above with relevant project identifiers or your contribution could potentially get lost.
-Example tags: `backend`, `bindings`, `python-bindings`, `documentation`, etc.
+Example tags: `backend`, `documentation`, etc.
## Citation
diff --git a/gpt4all-bindings/python/docs/assets/add.png b/docs/assets/add.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/add.png
rename to docs/assets/add.png
diff --git a/gpt4all-bindings/python/docs/assets/add_model_gpt4.png b/docs/assets/add_model_gpt4.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/add_model_gpt4.png
rename to docs/assets/add_model_gpt4.png
diff --git a/gpt4all-bindings/python/docs/assets/attach_spreadsheet.png b/docs/assets/attach_spreadsheet.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/attach_spreadsheet.png
rename to docs/assets/attach_spreadsheet.png
diff --git a/gpt4all-bindings/python/docs/assets/baelor.png b/docs/assets/baelor.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/baelor.png
rename to docs/assets/baelor.png
diff --git a/gpt4all-bindings/python/docs/assets/before_first_chat.png b/docs/assets/before_first_chat.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/before_first_chat.png
rename to docs/assets/before_first_chat.png
diff --git a/gpt4all-bindings/python/docs/assets/chat_window.png b/docs/assets/chat_window.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/chat_window.png
rename to docs/assets/chat_window.png
diff --git a/gpt4all-bindings/python/docs/assets/closed_chat_panel.png b/docs/assets/closed_chat_panel.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/closed_chat_panel.png
rename to docs/assets/closed_chat_panel.png
diff --git a/gpt4all-bindings/python/docs/assets/configure_doc_collection.png b/docs/assets/configure_doc_collection.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/configure_doc_collection.png
rename to docs/assets/configure_doc_collection.png
diff --git a/gpt4all-bindings/python/docs/assets/disney_spreadsheet.png b/docs/assets/disney_spreadsheet.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/disney_spreadsheet.png
rename to docs/assets/disney_spreadsheet.png
diff --git a/gpt4all-bindings/python/docs/assets/download.png b/docs/assets/download.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/download.png
rename to docs/assets/download.png
diff --git a/gpt4all-bindings/python/docs/assets/download_llama.png b/docs/assets/download_llama.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/download_llama.png
rename to docs/assets/download_llama.png
diff --git a/gpt4all-bindings/python/docs/assets/explore.png b/docs/assets/explore.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/explore.png
rename to docs/assets/explore.png
diff --git a/gpt4all-bindings/python/docs/assets/explore_models.png b/docs/assets/explore_models.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/explore_models.png
rename to docs/assets/explore_models.png
diff --git a/gpt4all-bindings/python/docs/assets/favicon.ico b/docs/assets/favicon.ico
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/favicon.ico
rename to docs/assets/favicon.ico
diff --git a/gpt4all-bindings/python/docs/assets/good_tyrion.png b/docs/assets/good_tyrion.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/good_tyrion.png
rename to docs/assets/good_tyrion.png
diff --git a/gpt4all-bindings/python/docs/assets/got_docs_ready.png b/docs/assets/got_docs_ready.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/got_docs_ready.png
rename to docs/assets/got_docs_ready.png
diff --git a/gpt4all-bindings/python/docs/assets/got_done.png b/docs/assets/got_done.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/got_done.png
rename to docs/assets/got_done.png
diff --git a/gpt4all-bindings/python/docs/assets/gpt4all_home.png b/docs/assets/gpt4all_home.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/gpt4all_home.png
rename to docs/assets/gpt4all_home.png
diff --git a/gpt4all-bindings/python/docs/assets/gpt4all_xlsx_attachment.mp4 b/docs/assets/gpt4all_xlsx_attachment.mp4
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/gpt4all_xlsx_attachment.mp4
rename to docs/assets/gpt4all_xlsx_attachment.mp4
diff --git a/gpt4all-bindings/python/docs/assets/installed_models.png b/docs/assets/installed_models.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/installed_models.png
rename to docs/assets/installed_models.png
diff --git a/gpt4all-bindings/python/docs/assets/linux.png b/docs/assets/linux.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/linux.png
rename to docs/assets/linux.png
diff --git a/gpt4all-bindings/python/docs/assets/local_embed.gif b/docs/assets/local_embed.gif
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/local_embed.gif
rename to docs/assets/local_embed.gif
diff --git a/gpt4all-bindings/python/docs/assets/mac.png b/docs/assets/mac.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/mac.png
rename to docs/assets/mac.png
diff --git a/gpt4all-bindings/python/docs/assets/models_page_icon.png b/docs/assets/models_page_icon.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/models_page_icon.png
rename to docs/assets/models_page_icon.png
diff --git a/gpt4all-bindings/python/docs/assets/new_docs_annotated.png b/docs/assets/new_docs_annotated.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/new_docs_annotated.png
rename to docs/assets/new_docs_annotated.png
diff --git a/gpt4all-bindings/python/docs/assets/new_docs_annotated_filled.png b/docs/assets/new_docs_annotated_filled.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/new_docs_annotated_filled.png
rename to docs/assets/new_docs_annotated_filled.png
diff --git a/gpt4all-bindings/python/docs/assets/new_first_chat.png b/docs/assets/new_first_chat.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/new_first_chat.png
rename to docs/assets/new_first_chat.png
diff --git a/gpt4all-bindings/python/docs/assets/no_docs.png b/docs/assets/no_docs.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/no_docs.png
rename to docs/assets/no_docs.png
diff --git a/gpt4all-bindings/python/docs/assets/no_models.png b/docs/assets/no_models.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/no_models.png
rename to docs/assets/no_models.png
diff --git a/gpt4all-bindings/python/docs/assets/no_models_tiny.png b/docs/assets/no_models_tiny.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/no_models_tiny.png
rename to docs/assets/no_models_tiny.png
diff --git a/gpt4all-bindings/python/docs/assets/nomic.png b/docs/assets/nomic.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/nomic.png
rename to docs/assets/nomic.png
diff --git a/gpt4all-bindings/python/docs/assets/obsidian_adding_collection.png b/docs/assets/obsidian_adding_collection.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/obsidian_adding_collection.png
rename to docs/assets/obsidian_adding_collection.png
diff --git a/gpt4all-bindings/python/docs/assets/obsidian_docs.png b/docs/assets/obsidian_docs.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/obsidian_docs.png
rename to docs/assets/obsidian_docs.png
diff --git a/gpt4all-bindings/python/docs/assets/obsidian_response.png b/docs/assets/obsidian_response.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/obsidian_response.png
rename to docs/assets/obsidian_response.png
diff --git a/gpt4all-bindings/python/docs/assets/obsidian_sources.png b/docs/assets/obsidian_sources.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/obsidian_sources.png
rename to docs/assets/obsidian_sources.png
diff --git a/gpt4all-bindings/python/docs/assets/open_chat_panel.png b/docs/assets/open_chat_panel.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/open_chat_panel.png
rename to docs/assets/open_chat_panel.png
diff --git a/gpt4all-bindings/python/docs/assets/open_local_docs.png b/docs/assets/open_local_docs.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/open_local_docs.png
rename to docs/assets/open_local_docs.png
diff --git a/gpt4all-bindings/python/docs/assets/open_sources.png b/docs/assets/open_sources.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/open_sources.png
rename to docs/assets/open_sources.png
diff --git a/gpt4all-bindings/python/docs/assets/osbsidian_user_interaction.png b/docs/assets/osbsidian_user_interaction.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/osbsidian_user_interaction.png
rename to docs/assets/osbsidian_user_interaction.png
diff --git a/gpt4all-bindings/python/docs/assets/search_mistral.png b/docs/assets/search_mistral.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/search_mistral.png
rename to docs/assets/search_mistral.png
diff --git a/gpt4all-bindings/python/docs/assets/search_settings.png b/docs/assets/search_settings.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/search_settings.png
rename to docs/assets/search_settings.png
diff --git a/gpt4all-bindings/python/docs/assets/spreadsheet_chat.png b/docs/assets/spreadsheet_chat.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/spreadsheet_chat.png
rename to docs/assets/spreadsheet_chat.png
diff --git a/gpt4all-bindings/python/docs/assets/syrio_snippets.png b/docs/assets/syrio_snippets.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/syrio_snippets.png
rename to docs/assets/syrio_snippets.png
diff --git a/gpt4all-bindings/python/docs/assets/three_model_options.png b/docs/assets/three_model_options.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/three_model_options.png
rename to docs/assets/three_model_options.png
diff --git a/gpt4all-bindings/python/docs/assets/ubuntu.svg b/docs/assets/ubuntu.svg
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/ubuntu.svg
rename to docs/assets/ubuntu.svg
diff --git a/gpt4all-bindings/python/docs/assets/windows.png b/docs/assets/windows.png
similarity index 100%
rename from gpt4all-bindings/python/docs/assets/windows.png
rename to docs/assets/windows.png
diff --git a/gpt4all-bindings/python/docs/css/custom.css b/docs/css/custom.css
similarity index 100%
rename from gpt4all-bindings/python/docs/css/custom.css
rename to docs/css/custom.css
diff --git a/gpt4all-bindings/python/docs/gpt4all_api_server/home.md b/docs/gpt4all_api_server/home.md
similarity index 100%
rename from gpt4all-bindings/python/docs/gpt4all_api_server/home.md
rename to docs/gpt4all_api_server/home.md
diff --git a/gpt4all-bindings/python/docs/gpt4all_desktop/chat_templates.md b/docs/gpt4all_desktop/chat_templates.md
similarity index 100%
rename from gpt4all-bindings/python/docs/gpt4all_desktop/chat_templates.md
rename to docs/gpt4all_desktop/chat_templates.md
diff --git a/gpt4all-bindings/python/docs/gpt4all_desktop/chats.md b/docs/gpt4all_desktop/chats.md
similarity index 100%
rename from gpt4all-bindings/python/docs/gpt4all_desktop/chats.md
rename to docs/gpt4all_desktop/chats.md
diff --git a/gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-Obsidian.md b/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-Obsidian.md
similarity index 82%
rename from gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-Obsidian.md
rename to docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-Obsidian.md
index 2660c38d..1bf128d0 100644
--- a/gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-Obsidian.md
+++ b/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-Obsidian.md
@@ -46,7 +46,7 @@ Obsidian for Desktop is a powerful management and note-taking software designed
-
+
|
@@ -65,7 +65,7 @@ Obsidian for Desktop is a powerful management and note-taking software designed
-
+
|
@@ -76,7 +76,7 @@ Obsidian for Desktop is a powerful management and note-taking software designed
-
+
|
@@ -84,7 +84,7 @@ Obsidian for Desktop is a powerful management and note-taking software designed
-
+
|
@@ -96,7 +96,7 @@ Obsidian for Desktop is a powerful management and note-taking software designed
-
+
|
@@ -104,6 +104,3 @@ Obsidian for Desktop is a powerful management and note-taking software designed
## How It Works
Obsidian for Desktop syncs your Obsidian notes to your computer, while LocalDocs integrates these files into your LLM chats using embedding models. These models find semantically similar snippets from your files to enhance the context of your interactions.
-
-To learn more about embedding models and explore further, refer to the [Nomic Python SDK documentation](https://docs.nomic.ai/atlas/capabilities/embeddings).
-
diff --git a/gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-One-Drive.md b/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-One-Drive.md
similarity index 100%
rename from gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-One-Drive.md
rename to docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-One-Drive.md
diff --git a/gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-google-drive.md b/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-google-drive.md
similarity index 100%
rename from gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-google-drive.md
rename to docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-google-drive.md
diff --git a/gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-microsoft-excel.md b/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-microsoft-excel.md
similarity index 100%
rename from gpt4all-bindings/python/docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-microsoft-excel.md
rename to docs/gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-microsoft-excel.md
diff --git a/gpt4all-bindings/python/docs/gpt4all_desktop/localdocs.md b/docs/gpt4all_desktop/localdocs.md
similarity index 93%
rename from gpt4all-bindings/python/docs/gpt4all_desktop/localdocs.md
rename to docs/gpt4all_desktop/localdocs.md
index c3290a92..906279ad 100644
--- a/gpt4all-bindings/python/docs/gpt4all_desktop/localdocs.md
+++ b/docs/gpt4all_desktop/localdocs.md
@@ -44,5 +44,3 @@ LocalDocs brings the information you have from files on-device into your LLM cha
## How It Works
A LocalDocs collection uses Nomic AI's free and fast on-device embedding models to index your folder into text snippets that each get an **embedding vector**. These vectors allow us to find snippets from your files that are semantically similar to the questions and prompts you enter in your chats. We then include those semantically similar snippets in the prompt to the LLM.
-
-To try the embedding models yourself, we recommend using the [Nomic Python SDK](https://docs.nomic.ai/atlas/capabilities/embeddings)
diff --git a/gpt4all-bindings/python/docs/gpt4all_desktop/models.md b/docs/gpt4all_desktop/models.md
similarity index 100%
rename from gpt4all-bindings/python/docs/gpt4all_desktop/models.md
rename to docs/gpt4all_desktop/models.md
diff --git a/gpt4all-bindings/python/docs/gpt4all_desktop/quickstart.md b/docs/gpt4all_desktop/quickstart.md
similarity index 100%
rename from gpt4all-bindings/python/docs/gpt4all_desktop/quickstart.md
rename to docs/gpt4all_desktop/quickstart.md
diff --git a/gpt4all-bindings/python/docs/gpt4all_desktop/settings.md b/docs/gpt4all_desktop/settings.md
similarity index 100%
rename from gpt4all-bindings/python/docs/gpt4all_desktop/settings.md
rename to docs/gpt4all_desktop/settings.md
diff --git a/gpt4all-bindings/python/docs/gpt4all_help/faq.md b/docs/gpt4all_help/faq.md
similarity index 51%
rename from gpt4all-bindings/python/docs/gpt4all_help/faq.md
rename to docs/gpt4all_help/faq.md
index c94b0d04..eb12bb10 100644
--- a/gpt4all-bindings/python/docs/gpt4all_help/faq.md
+++ b/docs/gpt4all_help/faq.md
@@ -6,32 +6,16 @@
We support models with a `llama.cpp` implementation which have been uploaded to [HuggingFace](https://huggingface.co/).
-### Which embedding models are supported?
-
-We support SBert and Nomic Embed Text v1 & v1.5.
-
## Software
### What software do I need?
All you need is to [install GPT4all](../index.md) onto you Windows, Mac, or Linux computer.
-### Which SDK languages are supported?
-
-Our SDK is in Python for usability, but these are light bindings around [`llama.cpp`](https://github.com/ggerganov/llama.cpp) implementations that we contribute to for efficiency and accessibility on everyday computers.
-
### Is there an API?
Yes, you can run your model in server-mode with our [OpenAI-compatible API](https://platform.openai.com/docs/api-reference/completions), which you can configure in [settings](../gpt4all_desktop/settings.md#application-settings)
-### Can I monitor a GPT4All deployment?
-
-Yes, GPT4All [integrates](../gpt4all_python/monitoring.md) with [OpenLIT](https://github.com/openlit/openlit) so you can deploy LLMs with user interactions and hardware usage automatically monitored for full observability.
-
-### Is there a command line interface (CLI)?
-
-[Yes](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/cli), we have a lightweight use of the Python client as a CLI. We welcome further contributions!
-
## Hardware
### What hardware do I need?
diff --git a/gpt4all-bindings/python/docs/gpt4all_help/troubleshooting.md b/docs/gpt4all_help/troubleshooting.md
similarity index 97%
rename from gpt4all-bindings/python/docs/gpt4all_help/troubleshooting.md
rename to docs/gpt4all_help/troubleshooting.md
index ba132616..da5ac261 100644
--- a/gpt4all-bindings/python/docs/gpt4all_help/troubleshooting.md
+++ b/docs/gpt4all_help/troubleshooting.md
@@ -2,7 +2,7 @@
## Error Loading Models
-It is possible you are trying to load a model from HuggingFace whose weights are not compatible with our [backend](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings).
+It is possible you are trying to load a model from HuggingFace whose weights are not compatible with our [backend](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-backend).
Try downloading one of the officially supported models listed on the main models page in the application. If the problem persists, please share your experience on our [Discord](https://discord.com/channels/1076964370942267462).
diff --git a/gpt4all-bindings/python/docs/index.md b/docs/index.md
similarity index 54%
rename from gpt4all-bindings/python/docs/index.md
rename to docs/index.md
index 0b200bf4..19b9ef73 100644
--- a/gpt4all-bindings/python/docs/index.md
+++ b/docs/index.md
@@ -12,17 +12,3 @@ No API calls or GPUs required - you can just download the application and [get s
[Download for Mac](https://gpt4all.io/installers/gpt4all-installer-darwin.dmg)
[Download for Linux](https://gpt4all.io/installers/gpt4all-installer-linux.run)
-
-!!! note "Python SDK"
- Use GPT4All in Python to program with LLMs implemented with the [`llama.cpp`](https://github.com/ggerganov/llama.cpp) backend and [Nomic's C backend](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-backend). Nomic contributes to open source software like [`llama.cpp`](https://github.com/ggerganov/llama.cpp) to make LLMs accessible and efficient **for all**.
-
- ```bash
- pip install gpt4all
- ```
-
- ```python
- from gpt4all import GPT4All
- model = GPT4All("Meta-Llama-3-8B-Instruct.Q4_0.gguf") # downloads / loads a 4.66GB LLM
- with model.chat_session():
- print(model.generate("How can I run LLMs efficiently on my laptop?", max_tokens=1024))
- ```
diff --git a/gpt4all-bindings/python/docs/old/gpt4all_chat.md b/docs/old/gpt4all_chat.md
similarity index 100%
rename from gpt4all-bindings/python/docs/old/gpt4all_chat.md
rename to docs/old/gpt4all_chat.md
diff --git a/gpt4all-bindings/README.md b/gpt4all-bindings/README.md
deleted file mode 100644
index 722159fd..00000000
--- a/gpt4all-bindings/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# GPT4All Language Bindings
-These are the language bindings for the GPT4All backend. They provide functionality to load GPT4All models (and other llama.cpp models), generate text, and (in the case of the Python bindings) embed text as a vector representation.
-
-See their respective folders for language-specific documentation.
-
-### Languages
-- [Python](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python) (Nomic official, maintained by [@cebtenzzre](https://github.com/cebtenzzre))
-- [Node.js/Typescript](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/typescript) (community, maintained by [@jacoobes](https://github.com/jacoobes) and [@iimez](https://github.com/iimez))
-
-
-
-
-Archived Bindings
-
-
-The following bindings have been removed from this repository due to lack of maintenance. If adopted, they can be brought back—feel free to message a developer on Dicsord if you are interested in maintaining one of them. Below are links to their last available version (not necessarily the last working version).
-- C#: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/csharp)
-- Java: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/java)
-- Go: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/golang)
-
-
diff --git a/gpt4all-bindings/cli/README.md b/gpt4all-bindings/cli/README.md
deleted file mode 100644
index f0d1e57e..00000000
--- a/gpt4all-bindings/cli/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# GPT4All Command-Line Interface (CLI)
-
-GPT4All on the command-line.
-
-More details on the [wiki](https://github.com/nomic-ai/gpt4all/wiki/Python-CLI).
-
-## Quickstart
-
-The CLI is based on the `gpt4all` Python bindings and the `typer` package.
-
-The following shows one way to get started with the CLI, the documentation has more information.
-Typically, you will want to replace `python` with `python3` on _Unix-like_ systems and `py -3` on
-_Windows_. Also, it's assumed you have all the necessary Python components already installed.
-
-The CLI is a self-contained Python script named [app.py] ([download][app.py-download]). As long as
-its package dependencies are present, you can download and run it from wherever you like.
-
-[app.py]: https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-bindings/cli/app.py
-[app.py-download]: https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-bindings/cli/app.py
-
-```shell
-# optional but recommended: create and use a virtual environment
-python -m venv gpt4all-cli
-```
-_Windows_ and _Unix-like_ systems differ slightly in how you activate a _virtual environment_:
-- _Unix-like_, typically: `. gpt4all-cli/bin/activate`
-- _Windows_: `gpt4all-cli\Scripts\activate`
-
-Then:
-```shell
-# pip-install the necessary packages; omit '--user' if using a virtual environment
-python -m pip install --user --upgrade gpt4all typer
-# run the CLI
-python app.py repl
-```
-By default, it will automatically download the `Mistral Instruct` model to `.cache/gpt4all/` in your
-user directory, if necessary.
-
-If you have already saved a model beforehand, specify its path with the `-m`/`--model` argument,
-for example:
-```shell
-python app.py repl --model /home/user/my-gpt4all-models/mistral-7b-instruct-v0.1.Q4_0.gguf
-```
diff --git a/gpt4all-bindings/cli/app.py b/gpt4all-bindings/cli/app.py
deleted file mode 100755
index be6b5745..00000000
--- a/gpt4all-bindings/cli/app.py
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env python3
-"""GPT4All CLI
-
-The GPT4All CLI is a self-contained script based on the `gpt4all` and `typer` packages. It offers a
-REPL to communicate with a language model similar to the chat GUI application, but more basic.
-"""
-
-import importlib.metadata
-import io
-import sys
-from collections import namedtuple
-from typing_extensions import Annotated
-
-import typer
-from gpt4all import GPT4All
-
-
-MESSAGES = [
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "Hello there."},
- {"role": "assistant", "content": "Hi, how can I help you?"},
-]
-
-SPECIAL_COMMANDS = {
- "/reset": lambda messages: messages.clear(),
- "/exit": lambda _: sys.exit(),
- "/clear": lambda _: print("\n" * 100),
- "/help": lambda _: print("Special commands: /reset, /exit, /help and /clear"),
-}
-
-VersionInfo = namedtuple('VersionInfo', ['major', 'minor', 'micro'])
-VERSION_INFO = VersionInfo(1, 0, 2)
-VERSION = '.'.join(map(str, VERSION_INFO)) # convert to string form, like: '1.2.3'
-
-CLI_START_MESSAGE = f"""
-
- ██████ ██████ ████████ ██ ██ █████ ██ ██
-██ ██ ██ ██ ██ ██ ██ ██ ██ ██
-██ ███ ██████ ██ ███████ ███████ ██ ██
-██ ██ ██ ██ ██ ██ ██ ██ ██
- ██████ ██ ██ ██ ██ ██ ███████ ███████
-
-
-Welcome to the GPT4All CLI! Version {VERSION}
-Type /help for special commands.
-
-"""
-
-# create typer app
-app = typer.Typer()
-
-@app.command()
-def repl(
- model: Annotated[
- str,
- typer.Option("--model", "-m", help="Model to use for chatbot"),
- ] = "mistral-7b-instruct-v0.1.Q4_0.gguf",
- n_threads: Annotated[
- int,
- typer.Option("--n-threads", "-t", help="Number of threads to use for chatbot"),
- ] = None,
- device: Annotated[
- str,
- typer.Option("--device", "-d", help="Device to use for chatbot, e.g. gpu, amd, nvidia, intel. Defaults to CPU."),
- ] = None,
-):
- """The CLI read-eval-print loop."""
- gpt4all_instance = GPT4All(model, device=device)
-
- # if threads are passed, set them
- if n_threads is not None:
- num_threads = gpt4all_instance.model.thread_count()
- print(f"\nAdjusted: {num_threads} →", end="")
-
- # set number of threads
- gpt4all_instance.model.set_thread_count(n_threads)
-
- num_threads = gpt4all_instance.model.thread_count()
- print(f" {num_threads} threads", end="", flush=True)
- else:
- print(f"\nUsing {gpt4all_instance.model.thread_count()} threads", end="")
-
- print(CLI_START_MESSAGE)
-
- use_new_loop = False
- try:
- version = importlib.metadata.version('gpt4all')
- version_major = int(version.split('.')[0])
- if version_major >= 1:
- use_new_loop = True
- except:
- pass # fall back to old loop
- if use_new_loop:
- _new_loop(gpt4all_instance)
- else:
- _old_loop(gpt4all_instance)
-
-
-def _old_loop(gpt4all_instance):
- while True:
- message = input(" ⇢ ")
-
- # Check if special command and take action
- if message in SPECIAL_COMMANDS:
- SPECIAL_COMMANDS[message](MESSAGES)
- continue
-
- # if regular message, append to messages
- MESSAGES.append({"role": "user", "content": message})
-
- # execute chat completion and ignore the full response since
- # we are outputting it incrementally
- full_response = gpt4all_instance.chat_completion(
- MESSAGES,
- # preferential kwargs for chat ux
- n_past=0,
- n_predict=200,
- top_k=40,
- top_p=0.9,
- min_p=0.0,
- temp=0.9,
- n_batch=9,
- repeat_penalty=1.1,
- repeat_last_n=64,
- context_erase=0.0,
- # required kwargs for cli ux (incremental response)
- verbose=False,
- streaming=True,
- )
- # record assistant's response to messages
- MESSAGES.append(full_response.get("choices")[0].get("message"))
- print() # newline before next prompt
-
-
-def _new_loop(gpt4all_instance):
- with gpt4all_instance.chat_session():
- while True:
- message = input(" ⇢ ")
-
- # Check if special command and take action
- if message in SPECIAL_COMMANDS:
- SPECIAL_COMMANDS[message](MESSAGES)
- continue
-
- # if regular message, append to messages
- MESSAGES.append({"role": "user", "content": message})
-
- # execute chat completion and ignore the full response since
- # we are outputting it incrementally
- response_generator = gpt4all_instance.generate(
- message,
- # preferential kwargs for chat ux
- max_tokens=200,
- temp=0.9,
- top_k=40,
- top_p=0.9,
- min_p=0.0,
- repeat_penalty=1.1,
- repeat_last_n=64,
- n_batch=9,
- # required kwargs for cli ux (incremental response)
- streaming=True,
- )
- response = io.StringIO()
- for token in response_generator:
- print(token, end='', flush=True)
- response.write(token)
-
- # record assistant's response to messages
- response_message = {'role': 'assistant', 'content': response.getvalue()}
- response.close()
- gpt4all_instance.current_chat_session.append(response_message)
- MESSAGES.append(response_message)
- print() # newline before next prompt
-
-
-@app.command()
-def version():
- """The CLI version command."""
- print(f"gpt4all-cli v{VERSION}")
-
-
-if __name__ == "__main__":
- app()
diff --git a/gpt4all-bindings/cli/developer_notes.md b/gpt4all-bindings/cli/developer_notes.md
deleted file mode 100644
index 0c1b75d5..00000000
--- a/gpt4all-bindings/cli/developer_notes.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Developing the CLI
-## Documentation
-Documentation can be found in three places:
-- `app.py` docstrings & comments
-- a Readme: `gpt4all-bindings/cli/README.md`
-- the actual CLI documentation: `gpt4all-bindings/python/docs/gpt4all_cli.md`
-
-The _docstrings_ are meant for programmatic use. Since the CLI is primarily geared towards users and
-not to build on top, they're kept terse.
-
-The _Readme_ is mostly meant for users and includes:
-- a link to the _CLI documentation_ (on the [website])
-- a Quickstart section with some guidance on how to get started with a sane setup
-
-The _CLI documentation_ and other documentation are located in the above mentioned `docs/` folder.
-They're in Markdown format and built for the [website]. Of the three, they should be the most
-detailed.
-
-[website]: https://docs.gpt4all.io/gpt4all_cli.html
-
-
-## Versioning
-The version number should now follow the `gpt4all` PyPI package, so compatibility is more clear.
-
-The one place to change it is the `namedtuple` called `VERSION_INFO`.
diff --git a/gpt4all-bindings/python/.gitignore b/gpt4all-bindings/python/.gitignore
deleted file mode 100644
index 970db3ec..00000000
--- a/gpt4all-bindings/python/.gitignore
+++ /dev/null
@@ -1,164 +0,0 @@
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-*.py,cover
-.hypothesis/
-.pytest_cache/
-cover/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-.pybuilder/
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-# For a library or package, you might want to ignore these files since the code is
-# intended to run in multiple environments; otherwise, check them in:
-# .python-version
-
-# pipenv
-# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-# However, in case of collaboration, if having platform-specific dependencies or dependencies
-# having no cross-platform support, pipenv may install dependencies that don't work, or not
-# install all needed dependencies.
-#Pipfile.lock
-
-# poetry
-# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
-# This is especially recommended for binary packages to ensure reproducibility, and is more
-# commonly ignored for libraries.
-# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
-#poetry.lock
-
-# pdm
-# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
-#pdm.lock
-# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
-# in version control.
-# https://pdm.fming.dev/#use-with-ide
-.pdm.toml
-
-# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
-__pypackages__/
-
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.env
-.venv
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-# pytype static type analyzer
-.pytype/
-
-# Cython debug symbols
-cython_debug/
-
-# PyCharm
-# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
-# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
-# and can be added to the global gitignore or merged into this file. For a more nuclear
-# option (not recommended) you can uncomment the following to ignore the entire idea folder.
-#.idea/
-
-# Cython
-/*.c
-*DO_NOT_MODIFY/
\ No newline at end of file
diff --git a/gpt4all-bindings/python/.isort.cfg b/gpt4all-bindings/python/.isort.cfg
deleted file mode 100644
index 485c85a7..00000000
--- a/gpt4all-bindings/python/.isort.cfg
+++ /dev/null
@@ -1,7 +0,0 @@
-[settings]
-known_third_party=geopy,nltk,np,numpy,pandas,pysbd,fire,torch
-
-line_length=120
-include_trailing_comma=True
-multi_line_output=3
-use_parentheses=True
\ No newline at end of file
diff --git a/gpt4all-bindings/python/CHANGELOG.md b/gpt4all-bindings/python/CHANGELOG.md
deleted file mode 100644
index 1007a6ac..00000000
--- a/gpt4all-bindings/python/CHANGELOG.md
+++ /dev/null
@@ -1,75 +0,0 @@
-# Changelog
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
-
-## [Unreleased]
-
-### Added
-- Warn on Windows if the Microsoft Visual C++ runtime libraries are not found ([#2920](https://github.com/nomic-ai/gpt4all/pull/2920))
-- Basic cache for faster prefill when the input shares a prefix with previous context ([#3073](https://github.com/nomic-ai/gpt4all/pull/3073))
-- Add ability to modify or replace the history of an active chat session ([#3147](https://github.com/nomic-ai/gpt4all/pull/3147))
-
-### Changed
-- Rebase llama.cpp on latest upstream as of September 26th ([#2998](https://github.com/nomic-ai/gpt4all/pull/2998))
-- Change the error message when a message is too long ([#3004](https://github.com/nomic-ai/gpt4all/pull/3004))
-- Fix CalledProcessError on Intel Macs since v2.8.0 ([#3045](https://github.com/nomic-ai/gpt4all/pull/3045))
-- Use Jinja for chat templates instead of per-message QString.arg-style templates ([#3147](https://github.com/nomic-ai/gpt4all/pull/3147))
-
-## [2.8.2] - 2024-08-14
-
-### Fixed
-- Fixed incompatibility with Python 3.8 since v2.7.0 and Python <=3.11 since v2.8.1 ([#2871](https://github.com/nomic-ai/gpt4all/pull/2871))
-
-## [2.8.1] - 2024-08-13
-
-### Added
-- Use greedy sampling when temperature is set to zero ([#2854](https://github.com/nomic-ai/gpt4all/pull/2854))
-
-### Changed
-- Search for pip-installed CUDA 11 as well as CUDA 12 ([#2802](https://github.com/nomic-ai/gpt4all/pull/2802))
-- Stop shipping CUBINs to reduce wheel size ([#2802](https://github.com/nomic-ai/gpt4all/pull/2802))
-- Use llama\_kv\_cache ops to shift context faster ([#2781](https://github.com/nomic-ai/gpt4all/pull/2781))
-- Don't stop generating at end of context ([#2781](https://github.com/nomic-ai/gpt4all/pull/2781))
-
-### Fixed
-- Make reverse prompt detection work more reliably and prevent it from breaking output ([#2781](https://github.com/nomic-ai/gpt4all/pull/2781))
-- Explicitly target macOS 12.6 in CI to fix Metal compatibility on older macOS ([#2849](https://github.com/nomic-ai/gpt4all/pull/2849))
-- Do not initialize Vulkan driver when only using CPU ([#2843](https://github.com/nomic-ai/gpt4all/pull/2843))
-- Fix a segfault on exit when using CPU mode on Linux with NVIDIA and EGL ([#2843](https://github.com/nomic-ai/gpt4all/pull/2843))
-
-## [2.8.0] - 2024-08-05
-
-### Added
-- Support GPT-NeoX, Gemma 2, OpenELM, ChatGLM, and Jais architectures (all with Vulkan support) ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))
-- Enable Vulkan support for StarCoder2, XVERSE, Command R, and OLMo ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))
-- Support DeepSeek-V2 architecture (no Vulkan support) ([#2702](https://github.com/nomic-ai/gpt4all/pull/2702))
-- Add Llama 3.1 8B Instruct to models3.json (by [@3Simplex](https://github.com/3Simplex) in [#2731](https://github.com/nomic-ai/gpt4all/pull/2731) and [#2732](https://github.com/nomic-ai/gpt4all/pull/2732))
-- Support Llama 3.1 RoPE scaling ([#2758](https://github.com/nomic-ai/gpt4all/pull/2758))
-- Add Qwen2-1.5B-Instruct to models3.json (by [@ThiloteE](https://github.com/ThiloteE) in [#2759](https://github.com/nomic-ai/gpt4all/pull/2759))
-- Detect use of a Python interpreter under Rosetta for a clearer error message ([#2793](https://github.com/nomic-ai/gpt4all/pull/2793))
-
-### Changed
-- Build against CUDA 11.8 instead of CUDA 12 for better compatibility with older drivers ([#2639](https://github.com/nomic-ai/gpt4all/pull/2639))
-- Update llama.cpp to commit 87e397d00 from July 19th ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))
-
-### Removed
-- Remove unused internal llmodel\_has\_gpu\_device ([#2409](https://github.com/nomic-ai/gpt4all/pull/2409))
-- Remove support for GPT-J models ([#2676](https://github.com/nomic-ai/gpt4all/pull/2676), [#2693](https://github.com/nomic-ai/gpt4all/pull/2693))
-
-### Fixed
-- Fix debug mode crash on Windows and undefined behavior in LLamaModel::embedInternal ([#2467](https://github.com/nomic-ai/gpt4all/pull/2467))
-- Fix CUDA PTX errors with some GPT4All builds ([#2421](https://github.com/nomic-ai/gpt4all/pull/2421))
-- Fix mishandling of inputs greater than n\_ctx tokens after [#1970](https://github.com/nomic-ai/gpt4all/pull/1970) ([#2498](https://github.com/nomic-ai/gpt4all/pull/2498))
-- Fix crash when Kompute falls back to CPU ([#2640](https://github.com/nomic-ai/gpt4all/pull/2640))
-- Fix several Kompute resource management issues ([#2694](https://github.com/nomic-ai/gpt4all/pull/2694))
-- Fix crash/hang when some models stop generating, by showing special tokens ([#2701](https://github.com/nomic-ai/gpt4all/pull/2701))
-- Fix several backend issues ([#2778](https://github.com/nomic-ai/gpt4all/pull/2778))
- - Restore leading space removal logic that was incorrectly removed in [#2694](https://github.com/nomic-ai/gpt4all/pull/2694)
- - CUDA: Cherry-pick llama.cpp DMMV cols requirement fix that caused a crash with long conversations since [#2694](https://github.com/nomic-ai/gpt4all/pull/2694)
-
-[Unreleased]: https://github.com/nomic-ai/gpt4all/compare/python-v2.8.2...HEAD
-[2.8.2]: https://github.com/nomic-ai/gpt4all/compare/python-v2.8.1...python-v2.8.2
-[2.8.1]: https://github.com/nomic-ai/gpt4all/compare/python-v2.8.0...python-v2.8.1
-[2.8.0]: https://github.com/nomic-ai/gpt4all/compare/python-v2.7.0...python-v2.8.0
diff --git a/gpt4all-bindings/python/LICENSE.txt b/gpt4all-bindings/python/LICENSE.txt
deleted file mode 100644
index ac07e380..00000000
--- a/gpt4all-bindings/python/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2023 Nomic, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
diff --git a/gpt4all-bindings/python/MANIFEST.in b/gpt4all-bindings/python/MANIFEST.in
deleted file mode 100644
index ffee2b3e..00000000
--- a/gpt4all-bindings/python/MANIFEST.in
+++ /dev/null
@@ -1 +0,0 @@
-recursive-include gpt4all/llmodel_DO_NOT_MODIFY *
\ No newline at end of file
diff --git a/gpt4all-bindings/python/README.md b/gpt4all-bindings/python/README.md
deleted file mode 100644
index e2a4037a..00000000
--- a/gpt4all-bindings/python/README.md
+++ /dev/null
@@ -1,93 +0,0 @@
-# Python GPT4All
-
-This package contains a set of Python bindings around the `llmodel` C-API.
-
-Package on PyPI: https://pypi.org/project/gpt4all/
-
-## Documentation
-https://docs.gpt4all.io/gpt4all_python.html
-
-## Installation
-
-The easiest way to install the Python bindings for GPT4All is to use pip:
-
-```
-pip install gpt4all
-```
-
-This will download the latest version of the `gpt4all` package from PyPI.
-
-## Local Build
-
-As an alternative to downloading via pip, you may build the Python bindings from source.
-
-### Prerequisites
-
-You will need a compiler. On Windows, you should install Visual Studio with the C++ Development components. On macOS, you will need the full version of Xcode—Xcode Command Line Tools lacks certain required tools. On Linux, you will need a GCC or Clang toolchain with C++ support.
-
-On Windows and Linux, building GPT4All with full GPU support requires the [Vulkan SDK](https://vulkan.lunarg.com/sdk/home) and the latest [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads).
-
-### Building the python bindings
-
-1. Clone GPT4All and change directory:
-```
-git clone --recurse-submodules https://github.com/nomic-ai/gpt4all.git
-cd gpt4all/gpt4all-backend
-```
-
-2. Build the backend.
-
-If you are using Windows and have Visual Studio installed:
-```
-cmake -B build
-cmake --build build --parallel --config RelWithDebInfo
-```
-
-For all other platforms:
-```
-cmake -B build -DCMAKE_BUILD_TYPE=RelWithDebInfo
-cmake --build build --parallel
-```
-
-`RelWithDebInfo` is a good default, but you can also use `Release` or `Debug` depending on the situation.
-
-2. Install the Python package:
-```
-cd ../gpt4all-bindings/python
-pip install -e .
-```
-
-## Usage
-
-Test it out! In a Python script or console:
-
-```python
-from gpt4all import GPT4All
-model = GPT4All("orca-mini-3b-gguf2-q4_0.gguf")
-output = model.generate("The capital of France is ", max_tokens=3)
-print(output)
-```
-
-
-GPU Usage
-```python
-from gpt4all import GPT4All
-model = GPT4All("orca-mini-3b-gguf2-q4_0.gguf", device='gpu') # device='amd', device='intel'
-output = model.generate("The capital of France is ", max_tokens=3)
-print(output)
-```
-
-## Troubleshooting a Local Build
-- If you're on Windows and have compiled with a MinGW toolchain, you might run into an error like:
- ```
- FileNotFoundError: Could not find module '<...>\gpt4all-bindings\python\gpt4all\llmodel_DO_NOT_MODIFY\build\libllmodel.dll'
- (or one of its dependencies). Try using the full path with constructor syntax.
- ```
- The key phrase in this case is _"or one of its dependencies"_. The Python interpreter you're using
- probably doesn't see the MinGW runtime dependencies. At the moment, the following three are required:
- `libgcc_s_seh-1.dll`, `libstdc++-6.dll` and `libwinpthread-1.dll`. You should copy them from MinGW
- into a folder where Python will see them, preferably next to `libllmodel.dll`.
-
-- Note regarding the Microsoft toolchain: Compiling with MSVC is possible, but not the official way to
- go about it at the moment. MSVC doesn't produce DLLs with a `lib` prefix, which the bindings expect.
- You'd have to amend that yourself.
diff --git a/gpt4all-bindings/python/docs/gpt4all_python/home.md b/gpt4all-bindings/python/docs/gpt4all_python/home.md
deleted file mode 100644
index f77f7a00..00000000
--- a/gpt4all-bindings/python/docs/gpt4all_python/home.md
+++ /dev/null
@@ -1,159 +0,0 @@
-# GPT4All Python SDK
-
-## Installation
-
-To get started, pip-install the `gpt4all` package into your python environment.
-
-```bash
-pip install gpt4all
-```
-
-We recommend installing `gpt4all` into its own virtual environment using `venv` or `conda`
-
-## Load LLM
-
-Models are loaded by name via the `GPT4All` class. If it's your first time loading a model, it will be downloaded to your device and saved so it can be quickly reloaded next time you create a `GPT4All` model with the same name.
-
-!!! note "Load LLM"
-
- ```python
- from gpt4all import GPT4All
- model = GPT4All("Meta-Llama-3-8B-Instruct.Q4_0.gguf") # downloads / loads a 4.66GB LLM
- with model.chat_session():
- print(model.generate("How can I run LLMs efficiently on my laptop?", max_tokens=1024))
- ```
-
-| `GPT4All` model name| Filesize| RAM Required| Parameters| Quantization| Developer| License| MD5 Sum (Unique Hash)|
-|------|---------|-------|-------|-----------|----------|--------|----------------------|
-| `Meta-Llama-3-8B-Instruct.Q4_0.gguf`| 4.66 GB| 8 GB| 8 Billion| q4_0| Meta| [Llama 3 License](https://llama.meta.com/llama3/license/)| c87ad09e1e4c8f9c35a5fcef52b6f1c9|
-| `Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf`| 4.11 GB| 8 GB| 7 Billion| q4_0| Mistral & Nous Research | [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)| Coa5f6b4eabd3992da4d7fb7f020f921eb|
-| `Phi-3-mini-4k-instruct.Q4_0.gguf` | 2.18 GB| 4 GB| 3.8 billion| q4_0| Microsoft| [MIT](https://opensource.org/license/mit)| f8347badde9bfc2efbe89124d78ddaf5|
-| `orca-mini-3b-gguf2-q4_0.gguf`| 1.98 GB| 4 GB| 3 billion| q4_0| Microsoft | [CC-BY-NC-SA-4.0](https://spdx.org/licenses/CC-BY-NC-SA-4.0)| 0e769317b90ac30d6e09486d61fefa26|
-| `gpt4all-13b-snoozy-q4_0.gguf`| 7.37 GB| 16 GB| 13 billion| q4_0| Nomic AI| [GPL](https://www.gnu.org/licenses/gpl-3.0.en.html)| 40388eb2f8d16bb5d08c96fdfaac6b2c|
-
-
-## Chat Session Generation
-
-Most of the language models you will be able to access from HuggingFace have been trained as assistants. This guides language models to not just answer with relevant text, but *helpful* text.
-
-If you want your LLM's responses to be helpful in the typical sense, we recommend you apply the chat templates the models were finetuned with. Information about specific prompt templates is typically available on the official HuggingFace page for the model.
-
-!!! note "Example LLM Chat Session Generation"
-
- === "Code"
-
- Load `Llama 3` and enter the following prompt in a chat session:
-
- ```python
- from gpt4all import GPT4All
- model = GPT4All("Meta-Llama-3-8B-Instruct.Q4_0.gguf")
- with model.chat_session():
- print(model.generate("quadratic formula"))
- ```
-
- === "Output"
-
- With the default sampling settings, you should see something resembling the following:
- ```
- The quadratic formula!
-
- The quadratic formula is a mathematical formula that provides the solutions to a quadratic equation of the form:
-
- ax^2 + bx + c = 0
-
- where a, b, and c are constants. The formula is:
-
- x = (-b ± √(b^2 - 4ac)) / 2a
-
- Let's break it down:
-
- * x is the variable we're trying to solve for.
- * a, b, and c are the coefficients of the quadratic equation.
- * ± means "plus or minus".
- * √ denotes the square root.
-
- To use the formula, simply plug in the values of a, b, and c into the expression above. The resulting value(s) will be the solutions to the original quadratic equation!
-
- For example, let's say we have the quadratic equation:
-
- x^2 + 5x + 6 = 0
-
- We can plug these values into the formula as follows:
-
- a = 1
- b = 5
- ```
-
-## Direct Generation
-
-Directly calling `model.generate()` prompts the model without applying any templates.
-
-Note: this can result in responses that are less like helpful responses and more like mirroring the tone of your prompt. In general, a language model outside of a chat session is less of a helpful assistant and more of a lens into the distribution of the model's training data.
-
-As an example, see how the model's response changes when we give the same prompt as above without applying a chat session:
-
-!!! note "Example LLM Direct Generation"
-
- === "Code"
-
- Load `Llama 3` and enter the following prompt:
-
- ```python
- from gpt4all import GPT4All
- model = GPT4All("Meta-Llama-3-8B-Instruct.Q4_0.gguf")
- print(model.generate("quadratic formula"))
- ```
- === "Output"
-
- With the default sampling settings, you should see something resembling the following:
- ```
- . The equation is in the form of a + bx = c, where a and b are constants.
- The solution to this problem involves using the quadratic formula which states that for any quadratic equation ax^2+bx+c=0, its solutions can be found by:
- x = (-b ± √(b^2-4ac)) / 2a
- In your case, since you have a + bx = c, we need to rewrite it in the form of ax^2+bx+c=0. To do this, subtract both sides from c, so that:
- c - (a + bx) = 0
- Now, combine like terms on the left side and simplify:
- ax^2 + (-b)x + (c-a) = 0\n\nSo now we have a quadratic equation in standard form: ax^2+bx+c=0. We can use this to find its solutions using the quadratic formula:
-
- x = ((-b ± √((-b)^2
- ```
-
-Why did it respond differently? Because language models, before being fine-tuned as assistants, are trained to be more like a data mimic than a helpful assistant. Therefore our responses ends up more like a typical continuation of math-style text rather than a helpful answer in dialog.
-
-## Embeddings
-
-Nomic trains and open-sources free embedding models that will run very fast on your hardware.
-
-The easiest way to run the text embedding model locally uses the [`nomic`](https://github.com/nomic-ai/nomic) python library to interface with our fast [C/C++ implementations](ref.md#gpt4all.gpt4all.Embed4All).
-
-!!! note "Example Embeddings Generation"
-
- === "Code"
-
- Importing `embed` from the [`nomic`](https://github.com/nomic-ai/nomic) library, you can call `embed.text()` with `inference_mode="local"`. This downloads an embedding model and saves it for later.
-
- ```python
- from nomic import embed
- embeddings = embed.text(["String 1", "String 2"], inference_mode="local")['embeddings']
- print("Number of embeddings created:", len(embeddings))
- print("Number of dimensions per embedding:", len(embeddings[0]))
- ```
-
- === "Output"
-
- ```
- Number of embeddings created: 2
- Number of dimensions per embedding: 768
- ```
-
-
-
-To learn more about making embeddings locally with `nomic`, visit our [embeddings guide](https://docs.nomic.ai/atlas/guides/embeddings#local-inference).
-
-The following embedding models can be used within the application and with the `Embed4All` class from the `gpt4all` Python library. The default context length as GGUF files is 2048 but can be [extended](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5-GGUF#description).
-
-| Name| Using with `nomic`| `Embed4All` model name| Context Length| # Embedding Dimensions| File Size|
-|--------------------|-|------------------------------------------------------|---------------:|-----------------:|----------:|
-| [Nomic Embed v1](https://huggingface.co/nomic-ai/nomic-embed-text-v1-GGUF) | ```embed.text(strings, model="nomic-embed-text-v1", inference_mode="local")```| ```Embed4All("nomic-embed-text-v1.f16.gguf")```| 2048 | 768 | 262 MiB |
-| [Nomic Embed v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5-GGUF) | ```embed.text(strings, model="nomic-embed-text-v1.5", inference_mode="local")```| ```Embed4All("nomic-embed-text-v1.5.f16.gguf")``` | 2048| 64-768 | 262 MiB |
-| [SBert](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2)| n/a| ```Embed4All("all-MiniLM-L6-v2.gguf2.f16.gguf")```| 512 | 384 | 44 MiB |
diff --git a/gpt4all-bindings/python/docs/gpt4all_python/monitoring.md b/gpt4all-bindings/python/docs/gpt4all_python/monitoring.md
deleted file mode 100644
index 43809f8b..00000000
--- a/gpt4all-bindings/python/docs/gpt4all_python/monitoring.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# GPT4All Monitoring
-
-GPT4All integrates with [OpenLIT](https://github.com/openlit/openlit) OpenTelemetry auto-instrumentation to perform real-time monitoring of your LLM application and GPU hardware.
-
-Monitoring can enhance your GPT4All deployment with auto-generated traces and metrics for
-
-- **Performance Optimization:** Analyze latency, cost and token usage to ensure your LLM application runs efficiently, identifying and resolving performance bottlenecks swiftly.
-
-- **User Interaction Insights:** Capture each prompt and response to understand user behavior and usage patterns better, improving user experience and engagement.
-
-- **Detailed GPU Metrics:** Monitor essential GPU parameters such as utilization, memory consumption, temperature, and power usage to maintain optimal hardware performance and avert potential issues.
-
-## Setup Monitoring
-
-!!! note "Setup Monitoring"
-
- With [OpenLIT](https://github.com/openlit/openlit), you can automatically monitor traces and metrics for your LLM deployment:
-
- ```shell
- pip install openlit
- ```
-
- ```python
- from gpt4all import GPT4All
- import openlit
-
- openlit.init() # start
- # openlit.init(collect_gpu_stats=True) # Optional: To configure GPU monitoring
-
- model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')
-
- # Start a chat session and send queries
- with model.chat_session():
- response1 = model.generate(prompt='hello', temp=0)
- response2 = model.generate(prompt='write me a short poem', temp=0)
- response3 = model.generate(prompt='thank you', temp=0)
-
- print(model.current_chat_session)
- ```
-
-## Visualization
-
-### OpenLIT UI
-
-Connect to OpenLIT's UI to start exploring the collected LLM performance metrics and traces. Visit the OpenLIT [Quickstart Guide](https://docs.openlit.io/latest/quickstart) for step-by-step details.
-
-### Grafana, DataDog, & Other Integrations
-
-You can also send the data collected by OpenLIT to popular monitoring tools like Grafana and DataDog. For detailed instructions on setting up these connections, please refer to the OpenLIT [Connections Guide](https://docs.openlit.io/latest/connections/intro).
diff --git a/gpt4all-bindings/python/docs/gpt4all_python/ref.md b/gpt4all-bindings/python/docs/gpt4all_python/ref.md
deleted file mode 100644
index 22a597b1..00000000
--- a/gpt4all-bindings/python/docs/gpt4all_python/ref.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# GPT4All Python SDK Reference
-::: gpt4all.gpt4all.GPT4All
-
-::: gpt4all.gpt4all.Embed4All
\ No newline at end of file
diff --git a/gpt4all-bindings/python/docs/old/gpt4all_cli.md b/gpt4all-bindings/python/docs/old/gpt4all_cli.md
deleted file mode 100644
index 1f4989a0..00000000
--- a/gpt4all-bindings/python/docs/old/gpt4all_cli.md
+++ /dev/null
@@ -1,198 +0,0 @@
-# GPT4All CLI
-
-The GPT4All command-line interface (CLI) is a Python script which is built on top of the
-[Python bindings][docs-bindings-python] ([repository][repo-bindings-python]) and the [typer]
-package. The source code, README, and local build instructions can be found
-[here][repo-bindings-cli].
-
-[docs-bindings-python]: gpt4all_python.md
-[repo-bindings-python]: https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python
-[repo-bindings-cli]: https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/cli
-[typer]: https://typer.tiangolo.com/
-
-## Installation
-### The Short Version
-
-The CLI is a Python script called [app.py]. If you're already familiar with Python best practices,
-the short version is to [download app.py][app.py-download] into a folder of your choice, install
-the two required dependencies with some variant of:
-```shell
-pip install gpt4all typer
-```
-
-Then run it with a variant of:
-```shell
-python app.py repl
-```
-In case you're wondering, _REPL_ is an acronym for [read-eval-print loop][wiki-repl].
-
-[app.py]: https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-bindings/cli/app.py
-[app.py-download]: https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-bindings/cli/app.py
-[wiki-repl]: https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop
-
-### Recommendations & The Long Version
-
-Especially if you have several applications/libraries which depend on Python, to avoid descending
-into dependency hell at some point, you should:
-- Consider to always install into some kind of [_virtual environment_][venv].
-- On a _Unix-like_ system, don't use `sudo` for anything other than packages provided by the system
- package manager, i.e. never with `pip`.
-
-[venv]: https://docs.python.org/3/library/venv.html
-
-There are several ways and tools available to do this, so below are descriptions on how to install
-with a _virtual environment_ (recommended) or a user installation on all three main platforms.
-
-Different platforms can have slightly different ways to start the Python interpreter itself.
-
-Note: _Typer_ has an optional dependency for more fanciful output. If you want that, replace `typer`
-with `typer[all]` in the pip-install instructions below.
-
-#### Virtual Environment Installation
-You can name your _virtual environment_ folder for the CLI whatever you like. In the following,
-`gpt4all-cli` is used throughout.
-
-##### macOS
-
-There are at least three ways to have a Python installation on _macOS_, and possibly not all of them
-provide a full installation of Python and its tools. When in doubt, try the following:
-```shell
-python3 -m venv --help
-python3 -m pip --help
-```
-Both should print the help for the `venv` and `pip` commands, respectively. If they don't, consult
-the documentation of your Python installation on how to enable them, or download a separate Python
-variant, for example try an [unified installer package from python.org][python.org-downloads].
-
-[python.org-downloads]: https://www.python.org/downloads/
-
-Once ready, do:
-```shell
-python3 -m venv gpt4all-cli
-. gpt4all-cli/bin/activate
-python3 -m pip install gpt4all typer
-```
-
-##### Windows
-
-Download the [official installer from python.org][python.org-downloads] if Python isn't already
-present on your system.
-
-A _Windows_ installation should already provide all the components for a _virtual environment_. Run:
-```shell
-py -3 -m venv gpt4all-cli
-gpt4all-cli\Scripts\activate
-py -m pip install gpt4all typer
-```
-
-##### Linux
-
-On Linux, a Python installation is often split into several packages and not all are necessarily
-installed by default. For example, on Debian/Ubuntu and derived distros, you will want to ensure
-their presence with the following:
-```shell
-sudo apt-get install python3-venv python3-pip
-```
-The next steps are similar to the other platforms:
-```shell
-python3 -m venv gpt4all-cli
-. gpt4all-cli/bin/activate
-python3 -m pip install gpt4all typer
-```
-On other distros, the situation might be different. Especially the package names can vary a lot.
-You'll have to look it up in the documentation, software directory, or package search.
-
-#### User Installation
-##### macOS
-
-There are at least three ways to have a Python installation on _macOS_, and possibly not all of them
-provide a full installation of Python and its tools. When in doubt, try the following:
-```shell
-python3 -m pip --help
-```
-That should print the help for the `pip` command. If it doesn't, consult the documentation of your
-Python installation on how to enable them, or download a separate Python variant, for example try an
-[unified installer package from python.org][python.org-downloads].
-
-Once ready, do:
-```shell
-python3 -m pip install --user --upgrade gpt4all typer
-```
-
-##### Windows
-
-Download the [official installer from python.org][python.org-downloads] if Python isn't already
-present on your system. It includes all the necessary components. Run:
-```shell
-py -3 -m pip install --user --upgrade gpt4all typer
-```
-
-##### Linux
-
-On Linux, a Python installation is often split into several packages and not all are necessarily
-installed by default. For example, on Debian/Ubuntu and derived distros, you will want to ensure
-their presence with the following:
-```shell
-sudo apt-get install python3-pip
-```
-The next steps are similar to the other platforms:
-```shell
-python3 -m pip install --user --upgrade gpt4all typer
-```
-On other distros, the situation might be different. Especially the package names can vary a lot.
-You'll have to look it up in the documentation, software directory, or package search.
-
-## Running the CLI
-
-The CLI is a self-contained script called [app.py]. As such, you can [download][app.py-download]
-and save it anywhere you like, as long as the Python interpreter has access to the mentioned
-dependencies.
-
-Note: different platforms can have slightly different ways to start Python. Whereas below the
-interpreter command is written as `python` you typically want to type instead:
-- On _Unix-like_ systems: `python3`
-- On _Windows_: `py -3`
-
-The simplest way to start the CLI is:
-```shell
-python app.py repl
-```
-This automatically selects the [groovy] model and downloads it into the `.cache/gpt4all/` folder
-of your home directory, if not already present.
-
-[groovy]: https://huggingface.co/nomic-ai/gpt4all-j#model-details
-
-If you want to use a different model, you can do so with the `-m`/`--model` parameter. If only a
-model file name is provided, it will again check in `.cache/gpt4all/` and might start downloading.
-If instead given a path to an existing model, the command could for example look like this:
-```shell
-python app.py repl --model /home/user/my-gpt4all-models/gpt4all-13b-snoozy-q4_0.gguf
-```
-
-When you're done and want to end a session, simply type `/exit`.
-
-To get help and information on all the available commands and options on the command-line, run:
-```shell
-python app.py --help
-```
-And while inside the running _REPL_, write `/help`.
-
-Note that if you've installed the required packages into a _virtual environment_, you don't need
-to activate that every time you want to run the CLI. Instead, you can just start it with the Python
-interpreter in the folder `gpt4all-cli/bin/` (_Unix-like_) or `gpt4all-cli/Script/` (_Windows_).
-
-That also makes it easy to set an alias e.g. in [Bash][bash-aliases] or [PowerShell][posh-aliases]:
-- Bash: `alias gpt4all="'/full/path/to/gpt4all-cli/bin/python' '/full/path/to/app.py' repl"`
-- PowerShell:
- ```posh
- Function GPT4All-Venv-CLI {"C:\full\path\to\gpt4all-cli\Scripts\python.exe" "C:\full\path\to\app.py" repl}
- Set-Alias -Name gpt4all -Value GPT4All-Venv-CLI
- ```
-
-Don't forget to save these in the start-up file of your shell.
-
-[bash-aliases]: https://www.gnu.org/software/bash/manual/html_node/Aliases.html
-[posh-aliases]: https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.utility/set-alias
-
-Finally, if on _Windows_ you see a box instead of an arrow `⇢` as the prompt character, you should
-change the console font to one which offers better Unicode support.
diff --git a/gpt4all-bindings/python/docs/old/gpt4all_faq.md b/gpt4all-bindings/python/docs/old/gpt4all_faq.md
deleted file mode 100644
index 74a95772..00000000
--- a/gpt4all-bindings/python/docs/old/gpt4all_faq.md
+++ /dev/null
@@ -1,100 +0,0 @@
-# GPT4All FAQ
-
-## What models are supported by the GPT4All ecosystem?
-
-Currently, there are six different model architectures that are supported:
-
-1. GPT-J - Based off of the GPT-J architecture with examples found [here](https://huggingface.co/EleutherAI/gpt-j-6b)
-2. LLaMA - Based off of the LLaMA architecture with examples found [here](https://huggingface.co/models?sort=downloads&search=llama)
-3. MPT - Based off of Mosaic ML's MPT architecture with examples found [here](https://huggingface.co/mosaicml/mpt-7b)
-4. Replit - Based off of Replit Inc.'s Replit architecture with examples found [here](https://huggingface.co/replit/replit-code-v1-3b)
-5. Falcon - Based off of TII's Falcon architecture with examples found [here](https://huggingface.co/tiiuae/falcon-40b)
-6. StarCoder - Based off of BigCode's StarCoder architecture with examples found [here](https://huggingface.co/bigcode/starcoder)
-
-## Why so many different architectures? What differentiates them?
-
-One of the major differences is license. Currently, the LLaMA based models are subject to a non-commercial license, whereas the GPTJ and MPT base
-models allow commercial usage. However, its successor [Llama 2 is commercially licensable](https://ai.meta.com/llama/license/), too. In the early
-advent of the recent explosion of activity in open source local models, the LLaMA models have generally been seen as performing better, but that is
-changing quickly. Every week - even every day! - new models are released with some of the GPTJ and MPT models competitive in performance/quality with
-LLaMA. What's more, there are some very nice architectural innovations with the MPT models that could lead to new performance/quality gains.
-
-## How does GPT4All make these models available for CPU inference?
-
-By leveraging the ggml library written by Georgi Gerganov and a growing community of developers. There are currently multiple different versions of
-this library. The original GitHub repo can be found [here](https://github.com/ggerganov/ggml), but the developer of the library has also created a
-LLaMA based version [here](https://github.com/ggerganov/llama.cpp). Currently, this backend is using the latter as a submodule.
-
-## Does that mean GPT4All is compatible with all llama.cpp models and vice versa?
-
-Yes!
-
-The upstream [llama.cpp](https://github.com/ggerganov/llama.cpp) project has introduced several [compatibility breaking] quantization methods recently.
-This is a breaking change that renders all previous models (including the ones that GPT4All uses) inoperative with newer versions of llama.cpp since
-that change.
-
-Fortunately, we have engineered a submoduling system allowing us to dynamically load different versions of the underlying library so that
-GPT4All just works.
-
-[compatibility breaking]: https://github.com/ggerganov/llama.cpp/commit/b9fd7eee57df101d4a3e3eabc9fd6c2cb13c9ca1
-
-## What are the system requirements?
-
-Your CPU needs to support [AVX or AVX2 instructions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions) and you need enough RAM to load a model into memory.
-
-## What about GPU inference?
-
-In newer versions of llama.cpp, there has been some added support for NVIDIA GPU's for inference. We're investigating how to incorporate this into our downloadable installers.
-
-## Ok, so bottom line... how do I make my model on Hugging Face compatible with GPT4All ecosystem right now?
-
-1. Check to make sure the Hugging Face model is available in one of our three supported architectures
-2. If it is, then you can use the conversion script inside of our pinned llama.cpp submodule for GPTJ and LLaMA based models
-3. Or if your model is an MPT model you can use the conversion script located directly in this backend directory under the scripts subdirectory
-
-## Language Bindings
-
-#### There's a problem with the download
-
-Some bindings can download a model, if allowed to do so. For example, in Python or TypeScript if `allow_download=True`
-or `allowDownload=true` (default), a model is automatically downloaded into `.cache/gpt4all/` in the user's home folder,
-unless it already exists.
-
-In case of connection issues or errors during the download, you might want to manually verify the model file's MD5
-checksum by comparing it with the one listed in [models3.json].
-
-As an alternative to the basic downloader built into the bindings, you can choose to download from the
- website instead. Scroll down to 'Model Explorer' and pick your preferred model.
-
-[models3.json]: https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-chat/metadata/models3.json
-
-#### I need the chat GUI and bindings to behave the same
-
-The chat GUI and bindings are based on the same backend. You can make them behave the same way by following these steps:
-
-- First of all, ensure that all parameters in the chat GUI settings match those passed to the generating API, e.g.:
-
- === "Python"
- ``` py
- from gpt4all import GPT4All
- model = GPT4All(...)
- model.generate("prompt text", temp=0, ...) # adjust parameters
- ```
- === "TypeScript"
- ``` ts
- import { createCompletion, loadModel } from '../src/gpt4all.js'
- const ll = await loadModel(...);
- const messages = ...
- const re = await createCompletion(ll, messages, { temp: 0, ... }); // adjust parameters
- ```
-
-- To make comparing the output easier, set _Temperature_ in both to 0 for now. This will make the output deterministic.
-
-- Next you'll have to compare the templates, adjusting them as necessary, based on how you're using the bindings.
- - Specifically, in Python:
- - With simple `generate()` calls, the input has to be surrounded with system and prompt templates.
- - When using a chat session, it depends on whether the bindings are allowed to download [models3.json]. If yes,
- and in the chat GUI the default templates are used, it'll be handled automatically. If no, use
- `chat_session()` template parameters to customize them.
-
-- Once you're done, remember to reset _Temperature_ to its previous value in both chat GUI and your custom code.
diff --git a/gpt4all-bindings/python/docs/old/gpt4all_monitoring.md b/gpt4all-bindings/python/docs/old/gpt4all_monitoring.md
deleted file mode 100644
index bc606dde..00000000
--- a/gpt4all-bindings/python/docs/old/gpt4all_monitoring.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# Monitoring
-
-Leverage OpenTelemetry to perform real-time monitoring of your LLM application and GPUs using [OpenLIT](https://github.com/openlit/openlit). This tool helps you easily collect data on user interactions, performance metrics, along with GPU Performance metrics, which can assist in enhancing the functionality and dependability of your GPT4All based LLM application.
-
-## How it works?
-
-OpenLIT adds automatic OTel instrumentation to the GPT4All SDK. It covers the `generate` and `embedding` functions, helping to track LLM usage by gathering inputs and outputs. This allows users to monitor and evaluate the performance and behavior of their LLM application in different environments. OpenLIT also provides OTel auto-instrumentation for monitoring GPU metrics like utilization, temperature, power usage, and memory usage.
-
-Additionally, you have the flexibility to view and analyze the generated traces and metrics either in the OpenLIT UI or by exporting them to widely used observability tools like Grafana and DataDog for more comprehensive analysis and visualization.
-
-## Getting Started
-
-Here’s a straightforward guide to help you set up and start monitoring your application:
-
-### 1. Install the OpenLIT SDK
-Open your terminal and run:
-
-```shell
-pip install openlit
-```
-
-### 2. Setup Monitoring for your Application
-In your application, initiate OpenLIT as outlined below:
-
-```python
-from gpt4all import GPT4All
-import openlit
-
-openlit.init() # Initialize OpenLIT monitoring
-
-model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')
-
-# Start a chat session and send queries
-with model.chat_session():
- response1 = model.generate(prompt='hello', temp=0)
- response2 = model.generate(prompt='write me a short poem', temp=0)
- response3 = model.generate(prompt='thank you', temp=0)
-
- print(model.current_chat_session)
-```
-This setup wraps your gpt4all model interactions, capturing valuable data about each request and response.
-
-### 3. (Optional) Enable GPU Monitoring
-
-If your application runs on NVIDIA GPUs, you can enable GPU stats collection in the OpenLIT SDK by adding `collect_gpu_stats=True`. This collects GPU metrics like utilization, temperature, power usage, and memory-related performance metrics. The collected metrics are OpenTelemetry gauges.
-
-```python
-from gpt4all import GPT4All
-import openlit
-
-openlit.init(collect_gpu_stats=True) # Initialize OpenLIT monitoring
-
-model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')
-
-# Start a chat session and send queries
-with model.chat_session():
- response1 = model.generate(prompt='hello', temp=0)
- response2 = model.generate(prompt='write me a short poem', temp=0)
- response3 = model.generate(prompt='thank you', temp=0)
-
- print(model.current_chat_session)
-```
-
-### Visualize
-
-Once you've set up data collection with [OpenLIT](https://github.com/openlit/openlit), you can visualize and analyze this information to better understand your application's performance:
-
-- **Using OpenLIT UI:** Connect to OpenLIT's UI to start exploring performance metrics. Visit the OpenLIT [Quickstart Guide](https://docs.openlit.io/latest/quickstart) for step-by-step details.
-
-- **Integrate with existing Observability Tools:** If you use tools like Grafana or DataDog, you can integrate the data collected by OpenLIT. For instructions on setting up these connections, check the OpenLIT [Connections Guide](https://docs.openlit.io/latest/connections/intro).
diff --git a/gpt4all-bindings/python/docs/old/gpt4all_nodejs.md b/gpt4all-bindings/python/docs/old/gpt4all_nodejs.md
deleted file mode 100644
index a282a47a..00000000
--- a/gpt4all-bindings/python/docs/old/gpt4all_nodejs.md
+++ /dev/null
@@ -1,1029 +0,0 @@
-# GPT4All Node.js API
-
-Native Node.js LLM bindings for all.
-
-```sh
-yarn add gpt4all@latest
-
-npm install gpt4all@latest
-
-pnpm install gpt4all@latest
-
-```
-
-## Contents
-
-* See [API Reference](#api-reference)
-* See [Examples](#api-example)
-* See [Developing](#develop)
-* GPT4ALL nodejs bindings created by [jacoobes](https://github.com/jacoobes), [limez](https://github.com/iimez) and the [nomic ai community](https://home.nomic.ai), for all to use.
-
-## Api Example
-
-### Chat Completion
-
-```js
-import { LLModel, createCompletion, DEFAULT_DIRECTORY, DEFAULT_LIBRARIES_DIRECTORY, loadModel } from '../src/gpt4all.js'
-
-const model = await loadModel( 'mistral-7b-openorca.gguf2.Q4_0.gguf', { verbose: true, device: 'gpu' });
-
-const completion1 = await createCompletion(model, 'What is 1 + 1?', { verbose: true, })
-console.log(completion1.message)
-
-const completion2 = await createCompletion(model, 'And if we add two?', { verbose: true })
-console.log(completion2.message)
-
-model.dispose()
-```
-
-### Embedding
-
-```js
-import { loadModel, createEmbedding } from '../src/gpt4all.js'
-
-const embedder = await loadModel("all-MiniLM-L6-v2-f16.gguf", { verbose: true, type: 'embedding'})
-
-console.log(createEmbedding(embedder, "Maybe Minecraft was the friends we made along the way"));
-```
-
-### Chat Sessions
-
-```js
-import { loadModel, createCompletion } from "../src/gpt4all.js";
-
-const model = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", {
- verbose: true,
- device: "gpu",
-});
-
-const chat = await model.createChatSession();
-
-await createCompletion(
- chat,
- "Why are bananas rather blue than bread at night sometimes?",
- {
- verbose: true,
- }
-);
-await createCompletion(chat, "Are you sure?", { verbose: true, });
-
-```
-
-### Streaming responses
-
-```js
-import gpt from "../src/gpt4all.js";
-
-const model = await gpt.loadModel("mistral-7b-openorca.gguf2.Q4_0.gguf", {
- device: "gpu",
-});
-
-process.stdout.write("### Stream:");
-const stream = gpt.createCompletionStream(model, "How are you?");
-stream.tokens.on("data", (data) => {
- process.stdout.write(data);
-});
-//wait till stream finishes. We cannot continue until this one is done.
-await stream.result;
-process.stdout.write("\n");
-
-process.stdout.write("### Stream with pipe:");
-const stream2 = gpt.createCompletionStream(
- model,
- "Please say something nice about node streams."
-);
-stream2.tokens.pipe(process.stdout);
-await stream2.result;
-process.stdout.write("\n");
-
-console.log("done");
-model.dispose();
-```
-
-### Async Generators
-
-```js
-import gpt from "../src/gpt4all.js";
-
-const model = await gpt.loadModel("mistral-7b-openorca.gguf2.Q4_0.gguf", {
- device: "gpu",
-});
-
-process.stdout.write("### Generator:");
-const gen = gpt.createCompletionGenerator(model, "Redstone in Minecraft is Turing Complete. Let that sink in. (let it in!)");
-for await (const chunk of gen) {
- process.stdout.write(chunk);
-}
-
-process.stdout.write("\n");
-model.dispose();
-```
-
-## Develop
-
-### Build Instructions
-
-* binding.gyp is compile config
-* Tested on Ubuntu. Everything seems to work fine
-* Tested on Windows. Everything works fine.
-* Sparse testing on mac os.
-* MingW works as well to build the gpt4all-backend. **HOWEVER**, this package works only with MSVC built dlls.
-
-### Requirements
-
-* git
-* [node.js >= 18.0.0](https://nodejs.org/en)
-* [yarn](https://yarnpkg.com/)
-* [node-gyp](https://github.com/nodejs/node-gyp)
- * all of its requirements.
-* (unix) gcc version 12
-* (win) msvc version 143
- * Can be obtained with visual studio 2022 build tools
-* python 3
-* On Windows and Linux, building GPT4All requires the complete Vulkan SDK. You may download it from here: https://vulkan.lunarg.com/sdk/home
-* macOS users do not need Vulkan, as GPT4All will use Metal instead.
-
-### Build (from source)
-
-```sh
-git clone https://github.com/nomic-ai/gpt4all.git
-cd gpt4all-bindings/typescript
-```
-
-* The below shell commands assume the current working directory is `typescript`.
-
-* To Build and Rebuild:
-
-```sh
-yarn
-```
-
-* llama.cpp git submodule for gpt4all can be possibly absent. If this is the case, make sure to run in llama.cpp parent directory
-
-```sh
-git submodule update --init --depth 1 --recursive
-```
-
-```sh
-yarn build:backend
-```
-
-This will build platform-dependent dynamic libraries, and will be located in runtimes/(platform)/native The only current way to use them is to put them in the current working directory of your application. That is, **WHEREVER YOU RUN YOUR NODE APPLICATION**
-
-* llama-xxxx.dll is required.
-* According to whatever model you are using, you'll need to select the proper model loader.
- * For example, if you running an Mosaic MPT model, you will need to select the mpt-(buildvariant).(dynamiclibrary)
-
-### Test
-
-```sh
-yarn test
-```
-
-### Source Overview
-
-#### src/
-
-* Extra functions to help aid devex
-* Typings for the native node addon
-* the javascript interface
-
-#### test/
-
-* simple unit testings for some functions exported.
-* more advanced ai testing is not handled
-
-#### spec/
-
-* Average look and feel of the api
-* Should work assuming a model and libraries are installed locally in working directory
-
-#### index.cc
-
-* The bridge between nodejs and c. Where the bindings are.
-
-#### prompt.cc
-
-* Handling prompting and inference of models in a threadsafe, asynchronous way.
-
-### Known Issues
-
-* why your model may be spewing bull 💩
- * The downloaded model is broken (just reinstall or download from official site)
-* Your model is hanging after a call to generate tokens.
- * Is `nPast` set too high? This may cause your model to hang (03/16/2024), Linux Mint, Ubuntu 22.04
-* Your GPU usage is still high after node.js exits.
- * Make sure to call `model.dispose()`!!!
-
-### Roadmap
-
-This package has been stabilizing over time development, and breaking changes may happen until the api stabilizes. Here's what's the todo list:
-
-* \[ ] Purely offline. Per the gui, which can be run completely offline, the bindings should be as well.
-* \[ ] NPM bundle size reduction via optionalDependencies strategy (need help)
- * Should include prebuilds to avoid painful node-gyp errors
-* \[x] createChatSession ( the python equivalent to create\_chat\_session )
-* \[x] generateTokens, the new name for createTokenStream. As of 3.2.0, this is released but not 100% tested. Check spec/generator.mjs!
-* \[x] ~~createTokenStream, an async iterator that streams each token emitted from the model. Planning on following this [example](https://github.com/nodejs/node-addon-examples/tree/main/threadsafe-async-iterator)~~ May not implement unless someone else can complete
-* \[x] prompt models via a threadsafe function in order to have proper non blocking behavior in nodejs
-* \[x] generateTokens is the new name for this^
-* \[x] proper unit testing (integrate with circle ci)
-* \[x] publish to npm under alpha tag `gpt4all@alpha`
-* \[x] have more people test on other platforms (mac tester needed)
-* \[x] switch to new pluggable backend
-
-### API Reference
-
-
-
-##### Table of Contents
-
-* [type](#type)
-* [TokenCallback](#tokencallback)
-* [ChatSessionOptions](#chatsessionoptions)
- * [systemPrompt](#systemprompt)
- * [messages](#messages)
-* [initialize](#initialize)
- * [Parameters](#parameters)
-* [generate](#generate)
- * [Parameters](#parameters-1)
-* [InferenceModel](#inferencemodel)
- * [createChatSession](#createchatsession)
- * [Parameters](#parameters-2)
- * [generate](#generate-1)
- * [Parameters](#parameters-3)
- * [dispose](#dispose)
-* [EmbeddingModel](#embeddingmodel)
- * [dispose](#dispose-1)
-* [InferenceResult](#inferenceresult)
-* [LLModel](#llmodel)
- * [constructor](#constructor)
- * [Parameters](#parameters-4)
- * [type](#type-1)
- * [name](#name)
- * [stateSize](#statesize)
- * [threadCount](#threadcount)
- * [setThreadCount](#setthreadcount)
- * [Parameters](#parameters-5)
- * [infer](#infer)
- * [Parameters](#parameters-6)
- * [embed](#embed)
- * [Parameters](#parameters-7)
- * [isModelLoaded](#ismodelloaded)
- * [setLibraryPath](#setlibrarypath)
- * [Parameters](#parameters-8)
- * [getLibraryPath](#getlibrarypath)
- * [initGpuByString](#initgpubystring)
- * [Parameters](#parameters-9)
- * [hasGpuDevice](#hasgpudevice)
- * [listGpu](#listgpu)
- * [Parameters](#parameters-10)
- * [dispose](#dispose-2)
-* [GpuDevice](#gpudevice)
- * [type](#type-2)
-* [LoadModelOptions](#loadmodeloptions)
- * [modelPath](#modelpath)
- * [librariesPath](#librariespath)
- * [modelConfigFile](#modelconfigfile)
- * [allowDownload](#allowdownload)
- * [verbose](#verbose)
- * [device](#device)
- * [nCtx](#nctx)
- * [ngl](#ngl)
-* [loadModel](#loadmodel)
- * [Parameters](#parameters-11)
-* [InferenceProvider](#inferenceprovider)
-* [createCompletion](#createcompletion)
- * [Parameters](#parameters-12)
-* [createCompletionStream](#createcompletionstream)
- * [Parameters](#parameters-13)
-* [createCompletionGenerator](#createcompletiongenerator)
- * [Parameters](#parameters-14)
-* [createEmbedding](#createembedding)
- * [Parameters](#parameters-15)
-* [CompletionOptions](#completionoptions)
- * [verbose](#verbose-1)
- * [onToken](#ontoken)
-* [Message](#message)
- * [role](#role)
- * [content](#content)
-* [prompt\_tokens](#prompt_tokens)
-* [completion\_tokens](#completion_tokens)
-* [total\_tokens](#total_tokens)
-* [n\_past\_tokens](#n_past_tokens)
-* [CompletionReturn](#completionreturn)
- * [model](#model)
- * [usage](#usage)
- * [message](#message-1)
-* [CompletionStreamReturn](#completionstreamreturn)
-* [LLModelPromptContext](#llmodelpromptcontext)
- * [logitsSize](#logitssize)
- * [tokensSize](#tokenssize)
- * [nPast](#npast)
- * [nPredict](#npredict)
- * [promptTemplate](#prompttemplate)
- * [nCtx](#nctx-1)
- * [topK](#topk)
- * [topP](#topp)
- * [minP](#minp)
- * [temperature](#temperature)
- * [nBatch](#nbatch)
- * [repeatPenalty](#repeatpenalty)
- * [repeatLastN](#repeatlastn)
- * [contextErase](#contexterase)
-* [DEFAULT\_DIRECTORY](#default_directory)
-* [DEFAULT\_LIBRARIES\_DIRECTORY](#default_libraries_directory)
-* [DEFAULT\_MODEL\_CONFIG](#default_model_config)
-* [DEFAULT\_PROMPT\_CONTEXT](#default_prompt_context)
-* [DEFAULT\_MODEL\_LIST\_URL](#default_model_list_url)
-* [downloadModel](#downloadmodel)
- * [Parameters](#parameters-16)
- * [Examples](#examples)
-* [DownloadModelOptions](#downloadmodeloptions)
- * [modelPath](#modelpath-1)
- * [verbose](#verbose-2)
- * [url](#url)
- * [md5sum](#md5sum)
-* [DownloadController](#downloadcontroller)
- * [cancel](#cancel)
- * [promise](#promise)
-
-#### type
-
-Model architecture. This argument currently does not have any functionality and is just used as descriptive identifier for user.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-#### TokenCallback
-
-Callback for controlling token generation. Return false to stop token generation.
-
-Type: function (tokenId: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number), token: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String), total: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)): [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)
-
-#### ChatSessionOptions
-
-**Extends Partial\**
-
-Options for the chat session.
-
-##### systemPrompt
-
-System prompt to ingest on initialization.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-##### messages
-
-Messages to ingest on initialization.
-
-Type: [Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array)<[Message](#message)>
-
-#### initialize
-
-Ingests system prompt and initial messages.
-Sets this chat session as the active chat session of the model.
-
-##### Parameters
-
-* `options` **[ChatSessionOptions](#chatsessionoptions)** The options for the chat session.
-
-Returns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)\**
-
-#### generate
-
-Prompts the model in chat-session context.
-
-##### Parameters
-
-* `prompt` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The prompt input.
-* `options` **[CompletionOptions](#completionoptions)?** Prompt context and other options.
-* `callback` **[TokenCallback](#tokencallback)?** Token generation callback.
-
-
-
-* Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the chat session is not the active chat session of the model.
-
-Returns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<[CompletionReturn](#completionreturn)>** The model's response to the prompt.
-
-#### InferenceModel
-
-InferenceModel represents an LLM which can make chat predictions, similar to GPT transformers.
-
-##### createChatSession
-
-Create a chat session with the model.
-
-###### Parameters
-
-* `options` **[ChatSessionOptions](#chatsessionoptions)?** The options for the chat session.
-
-Returns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)\** The chat session.
-
-##### generate
-
-Prompts the model with a given input and optional parameters.
-
-###### Parameters
-
-* `prompt` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)**
-* `options` **[CompletionOptions](#completionoptions)?** Prompt context and other options.
-* `callback` **[TokenCallback](#tokencallback)?** Token generation callback.
-* `input` The prompt input.
-
-Returns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<[CompletionReturn](#completionreturn)>** The model's response to the prompt.
-
-##### dispose
-
-delete and cleanup the native model
-
-Returns **void**
-
-#### EmbeddingModel
-
-EmbeddingModel represents an LLM which can create embeddings, which are float arrays
-
-##### dispose
-
-delete and cleanup the native model
-
-Returns **void**
-
-#### InferenceResult
-
-Shape of LLModel's inference result.
-
-#### LLModel
-
-LLModel class representing a language model.
-This is a base class that provides common functionality for different types of language models.
-
-##### constructor
-
-Initialize a new LLModel.
-
-###### Parameters
-
-* `path` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** Absolute path to the model file.
-
-
-
-* Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the model file does not exist.
-
-##### type
-
-undefined or user supplied
-
-Returns **([string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String) | [undefined](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/undefined))**
-
-##### name
-
-The name of the model.
-
-Returns **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)**
-
-##### stateSize
-
-Get the size of the internal state of the model.
-NOTE: This state data is specific to the type of model you have created.
-
-Returns **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** the size in bytes of the internal state of the model
-
-##### threadCount
-
-Get the number of threads used for model inference.
-The default is the number of physical cores your computer has.
-
-Returns **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** The number of threads used for model inference.
-
-##### setThreadCount
-
-Set the number of threads used for model inference.
-
-###### Parameters
-
-* `newNumber` **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** The new number of threads.
-
-Returns **void**
-
-##### infer
-
-Prompt the model with a given input and optional parameters.
-This is the raw output from model.
-Use the prompt function exported for a value
-
-###### Parameters
-
-* `prompt` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The prompt input.
-* `promptContext` **Partial<[LLModelPromptContext](#llmodelpromptcontext)>** Optional parameters for the prompt context.
-* `callback` **[TokenCallback](#tokencallback)?** optional callback to control token generation.
-
-Returns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<[InferenceResult](#inferenceresult)>** The result of the model prompt.
-
-##### embed
-
-Embed text with the model. Keep in mind that
-Use the prompt function exported for a value
-
-###### Parameters
-
-* `text` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The prompt input.
-
-Returns **[Float32Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Float32Array)** The result of the model prompt.
-
-##### isModelLoaded
-
-Whether the model is loaded or not.
-
-Returns **[boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)**
-
-##### setLibraryPath
-
-Where to search for the pluggable backend libraries
-
-###### Parameters
-
-* `s` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)**
-
-Returns **void**
-
-##### getLibraryPath
-
-Where to get the pluggable backend libraries
-
-Returns **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)**
-
-##### initGpuByString
-
-Initiate a GPU by a string identifier.
-
-###### Parameters
-
-* `memory_required` **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** Should be in the range size\_t or will throw
-* `device_name` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** 'amd' | 'nvidia' | 'intel' | 'gpu' | gpu name.
- read LoadModelOptions.device for more information
-
-Returns **[boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)**
-
-##### hasGpuDevice
-
-From C documentation
-
-Returns **[boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)** True if a GPU device is successfully initialized, false otherwise.
-
-##### listGpu
-
-GPUs that are usable for this LLModel
-
-###### Parameters
-
-* `nCtx` **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** Maximum size of context window
-
-
-
-* Throws **any** if hasGpuDevice returns false (i think)
-
-Returns **[Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array)<[GpuDevice](#gpudevice)>**
-
-##### dispose
-
-delete and cleanup the native model
-
-Returns **void**
-
-#### GpuDevice
-
-an object that contains gpu data on this machine.
-
-##### type
-
-same as VkPhysicalDeviceType
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-#### LoadModelOptions
-
-Options that configure a model's behavior.
-
-##### modelPath
-
-Where to look for model files.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-##### librariesPath
-
-Where to look for the backend libraries.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-##### modelConfigFile
-
-The path to the model configuration file, useful for offline usage or custom model configurations.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-##### allowDownload
-
-Whether to allow downloading the model if it is not present at the specified path.
-
-Type: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)
-
-##### verbose
-
-Enable verbose logging.
-
-Type: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)
-
-##### device
-
-The processing unit on which the model will run. It can be set to
-
-* "cpu": Model will run on the central processing unit.
-* "gpu": Model will run on the best available graphics processing unit, irrespective of its vendor.
-* "amd", "nvidia", "intel": Model will run on the best available GPU from the specified vendor.
-* "gpu name": Model will run on the GPU that matches the name if it's available.
- Note: If a GPU device lacks sufficient RAM to accommodate the model, an error will be thrown, and the GPT4All
- instance will be rendered invalid. It's advised to ensure the device has enough memory before initiating the
- model.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-##### nCtx
-
-The Maximum window size of this model
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### ngl
-
-Number of gpu layers needed
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-#### loadModel
-
-Loads a machine learning model with the specified name. The defacto way to create a model.
-By default this will download a model from the official GPT4ALL website, if a model is not present at given path.
-
-##### Parameters
-
-* `modelName` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The name of the model to load.
-* `options` **([LoadModelOptions](#loadmodeloptions) | [undefined](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/undefined))?** (Optional) Additional options for loading the model.
-
-Returns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<([InferenceModel](#inferencemodel) | [EmbeddingModel](#embeddingmodel))>** A promise that resolves to an instance of the loaded LLModel.
-
-#### InferenceProvider
-
-Interface for inference, implemented by InferenceModel and ChatSession.
-
-#### createCompletion
-
-The nodejs equivalent to python binding's chat\_completion
-
-##### Parameters
-
-* `provider` **[InferenceProvider](#inferenceprovider)** The inference model object or chat session
-* `message` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The user input message
-* `options` **[CompletionOptions](#completionoptions)** The options for creating the completion.
-
-Returns **[CompletionReturn](#completionreturn)** The completion result.
-
-#### createCompletionStream
-
-Streaming variant of createCompletion, returns a stream of tokens and a promise that resolves to the completion result.
-
-##### Parameters
-
-* `provider` **[InferenceProvider](#inferenceprovider)** The inference model object or chat session
-* `message` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The user input message.
-* `options` **[CompletionOptions](#completionoptions)** The options for creating the completion.
-
-Returns **[CompletionStreamReturn](#completionstreamreturn)** An object of token stream and the completion result promise.
-
-#### createCompletionGenerator
-
-Creates an async generator of tokens
-
-##### Parameters
-
-* `provider` **[InferenceProvider](#inferenceprovider)** The inference model object or chat session
-* `message` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The user input message.
-* `options` **[CompletionOptions](#completionoptions)** The options for creating the completion.
-
-Returns **AsyncGenerator<[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)>** The stream of generated tokens
-
-#### createEmbedding
-
-The nodejs moral equivalent to python binding's Embed4All().embed()
-meow
-
-##### Parameters
-
-* `model` **[EmbeddingModel](#embeddingmodel)** The language model object.
-* `text` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** text to embed
-
-Returns **[Float32Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Float32Array)** The completion result.
-
-#### CompletionOptions
-
-**Extends Partial\**
-
-The options for creating the completion.
-
-##### verbose
-
-Indicates if verbose logging is enabled.
-
-Type: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)
-
-##### onToken
-
-Callback for controlling token generation. Return false to stop processing.
-
-Type: [TokenCallback](#tokencallback)
-
-#### Message
-
-A message in the conversation.
-
-##### role
-
-The role of the message.
-
-Type: (`"system"` | `"assistant"` | `"user"`)
-
-##### content
-
-The message content.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-#### prompt\_tokens
-
-The number of tokens used in the prompt. Currently not available and always 0.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-#### completion\_tokens
-
-The number of tokens used in the completion.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-#### total\_tokens
-
-The total number of tokens used. Currently not available and always 0.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-#### n\_past\_tokens
-
-Number of tokens used in the conversation.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-#### CompletionReturn
-
-The result of a completion.
-
-##### model
-
-The model used for the completion.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-##### usage
-
-Token usage report.
-
-Type: {prompt\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number), completion\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number), total\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number), n\_past\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)}
-
-##### message
-
-The generated completion.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-#### CompletionStreamReturn
-
-The result of a streamed completion, containing a stream of tokens and a promise that resolves to the completion result.
-
-#### LLModelPromptContext
-
-Model inference arguments for generating completions.
-
-##### logitsSize
-
-The size of the raw logits vector.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### tokensSize
-
-The size of the raw tokens vector.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### nPast
-
-The number of tokens in the past conversation.
-This controls how far back the model looks when generating completions.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### nPredict
-
-The maximum number of tokens to predict.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### promptTemplate
-
-Template for user / assistant message pairs.
-%1 is required and will be replaced by the user input.
-%2 is optional and will be replaced by the assistant response.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-##### nCtx
-
-The context window size. Do not use, it has no effect. See loadModel options.
-THIS IS DEPRECATED!!!
-Use loadModel's nCtx option instead.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### topK
-
-The top-k logits to sample from.
-Top-K sampling selects the next token only from the top K most likely tokens predicted by the model.
-It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit
-the diversity of the output. A higher value for top-K (eg., 100) will consider more tokens and lead
-to more diverse text, while a lower value (eg., 10) will focus on the most probable tokens and generate
-more conservative text. 30 - 60 is a good range for most tasks.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### topP
-
-The nucleus sampling probability threshold.
-Top-P limits the selection of the next token to a subset of tokens with a cumulative probability
-above a threshold P. This method, also known as nucleus sampling, finds a balance between diversity
-and quality by considering both token probabilities and the number of tokens available for sampling.
-When using a higher value for top-P (eg., 0.95), the generated text becomes more diverse.
-On the other hand, a lower value (eg., 0.1) produces more focused and conservative text.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### minP
-
-The minimum probability of a token to be considered.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### temperature
-
-The temperature to adjust the model's output distribution.
-Temperature is like a knob that adjusts how creative or focused the output becomes. Higher temperatures
-(eg., 1.2) increase randomness, resulting in more imaginative and diverse text. Lower temperatures (eg., 0.5)
-make the output more focused, predictable, and conservative. When the temperature is set to 0, the output
-becomes completely deterministic, always selecting the most probable next token and producing identical results
-each time. A safe range would be around 0.6 - 0.85, but you are free to search what value fits best for you.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### nBatch
-
-The number of predictions to generate in parallel.
-By splitting the prompt every N tokens, prompt-batch-size reduces RAM usage during processing. However,
-this can increase the processing time as a trade-off. If the N value is set too low (e.g., 10), long prompts
-with 500+ tokens will be most affected, requiring numerous processing runs to complete the prompt processing.
-To ensure optimal performance, setting the prompt-batch-size to 2048 allows processing of all tokens in a single run.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### repeatPenalty
-
-The penalty factor for repeated tokens.
-Repeat-penalty can help penalize tokens based on how frequently they occur in the text, including the input prompt.
-A token that has already appeared five times is penalized more heavily than a token that has appeared only one time.
-A value of 1 means that there is no penalty and values larger than 1 discourage repeated tokens.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### repeatLastN
-
-The number of last tokens to penalize.
-The repeat-penalty-tokens N option controls the number of tokens in the history to consider for penalizing repetition.
-A larger value will look further back in the generated text to prevent repetitions, while a smaller value will only
-consider recent tokens.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-##### contextErase
-
-The percentage of context to erase if the context window is exceeded.
-
-Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)
-
-#### DEFAULT\_DIRECTORY
-
-From python api:
-models will be stored in (homedir)/.cache/gpt4all/\`
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-#### DEFAULT\_LIBRARIES\_DIRECTORY
-
-From python api:
-The default path for dynamic libraries to be stored.
-You may separate paths by a semicolon to search in multiple areas.
-This searches DEFAULT\_DIRECTORY/libraries, cwd/libraries, and finally cwd.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-#### DEFAULT\_MODEL\_CONFIG
-
-Default model configuration.
-
-Type: ModelConfig
-
-#### DEFAULT\_PROMPT\_CONTEXT
-
-Default prompt context.
-
-Type: [LLModelPromptContext](#llmodelpromptcontext)
-
-#### DEFAULT\_MODEL\_LIST\_URL
-
-Default model list url.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-#### downloadModel
-
-Initiates the download of a model file.
-By default this downloads without waiting. use the controller returned to alter this behavior.
-
-##### Parameters
-
-* `modelName` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The model to be downloaded.
-* `options` **[DownloadModelOptions](#downloadmodeloptions)** to pass into the downloader. Default is { location: (cwd), verbose: false }.
-
-##### Examples
-
-```javascript
-const download = downloadModel('ggml-gpt4all-j-v1.3-groovy.bin')
-download.promise.then(() => console.log('Downloaded!'))
-```
-
-* Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the model already exists in the specified location.
-* Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the model cannot be found at the specified url.
-
-Returns **[DownloadController](#downloadcontroller)** object that allows controlling the download process.
-
-#### DownloadModelOptions
-
-Options for the model download process.
-
-##### modelPath
-
-location to download the model.
-Default is process.cwd(), or the current working directory
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-##### verbose
-
-Debug mode -- check how long it took to download in seconds
-
-Type: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)
-
-##### url
-
-Remote download url. Defaults to `https://gpt4all.io/models/gguf/`
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-##### md5sum
-
-MD5 sum of the model file. If this is provided, the downloaded file will be checked against this sum.
-If the sums do not match, an error will be thrown and the file will be deleted.
-
-Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)
-
-#### DownloadController
-
-Model download controller.
-
-##### cancel
-
-Cancel the request to download if this is called.
-
-Type: function (): void
-
-##### promise
-
-A promise resolving to the downloaded models config once the download is done
-
-Type: [Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)\
diff --git a/gpt4all-bindings/python/docs/old/gpt4all_python.md b/gpt4all-bindings/python/docs/old/gpt4all_python.md
deleted file mode 100644
index b578bcd9..00000000
--- a/gpt4all-bindings/python/docs/old/gpt4all_python.md
+++ /dev/null
@@ -1,268 +0,0 @@
-# GPT4All Python Generation API
-The `GPT4All` python package provides bindings to our C/C++ model backend libraries.
-The source code and local build instructions can be found [here](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python).
-
-
-## Quickstart
-```bash
-pip install gpt4all
-```
-
-``` py
-from gpt4all import GPT4All
-model = GPT4All("orca-mini-3b-gguf2-q4_0.gguf")
-```
-
-This will:
-
-- Instantiate `GPT4All`, which is the primary public API to your large language model (LLM).
-- Automatically download the given model to `~/.cache/gpt4all/` if not already present.
-
-Read further to see how to chat with this model.
-
-
-### Chatting with GPT4All
-To start chatting with a local LLM, you will need to start a chat session. Within a chat session, the model will be
-prompted with the appropriate template, and history will be preserved between successive calls to `generate()`.
-
-=== "GPT4All Example"
- ``` py
- model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')
- with model.chat_session():
- response1 = model.generate(prompt='hello', temp=0)
- response2 = model.generate(prompt='write me a short poem', temp=0)
- response3 = model.generate(prompt='thank you', temp=0)
- print(model.current_chat_session)
- ```
-=== "Output"
- ``` json
- [
- {
- 'role': 'user',
- 'content': 'hello'
- },
- {
- 'role': 'assistant',
- 'content': 'What is your name?'
- },
- {
- 'role': 'user',
- 'content': 'write me a short poem'
- },
- {
- 'role': 'assistant',
- 'content': "I would love to help you with that! Here's a short poem I came up with:\nBeneath the autumn leaves,\nThe wind whispers through the trees.\nA gentle breeze, so at ease,\nAs if it were born to play.\nAnd as the sun sets in the sky,\nThe world around us grows still."
- },
- {
- 'role': 'user',
- 'content': 'thank you'
- },
- {
- 'role': 'assistant',
- 'content': "You're welcome! I hope this poem was helpful or inspiring for you. Let me know if there is anything else I can assist you with."
- }
- ]
- ```
-
-When using GPT4All models in the `chat_session()` context:
-
-- Consecutive chat exchanges are taken into account and not discarded until the session ends; as long as the model has capacity.
-- A system prompt is inserted into the beginning of the model's context.
-- Each prompt passed to `generate()` is wrapped in the appropriate prompt template. If you pass `allow_download=False`
- to GPT4All or are using a model that is not from the official models list, you must pass a prompt template using the
- `prompt_template` parameter of `chat_session()`.
-
-NOTE: If you do not use `chat_session()`, calls to `generate()` will not be wrapped in a prompt template. This will
-cause the model to *continue* the prompt instead of *answering* it. When in doubt, use a chat session, as many newer
-models are designed to be used exclusively with a prompt template.
-
-[models3.json]: https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-chat/metadata/models3.json
-
-
-### Streaming Generations
-To interact with GPT4All responses as the model generates, use the `streaming=True` flag during generation.
-
-=== "GPT4All Streaming Example"
- ``` py
- from gpt4all import GPT4All
- model = GPT4All("orca-mini-3b-gguf2-q4_0.gguf")
- tokens = []
- with model.chat_session():
- for token in model.generate("What is the capital of France?", streaming=True):
- tokens.append(token)
- print(tokens)
- ```
-=== "Output"
- ```
- [' The', ' capital', ' of', ' France', ' is', ' Paris', '.']
- ```
-
-
-### The Generate Method API
-::: gpt4all.gpt4all.GPT4All.generate
-
-
-## Examples & Explanations
-### Influencing Generation
-The three most influential parameters in generation are _Temperature_ (`temp`), _Top-p_ (`top_p`) and _Top-K_ (`top_k`).
-In a nutshell, during the process of selecting the next token, not just one or a few are considered, but every single
-token in the vocabulary is given a probability. The parameters can change the field of candidate tokens.
-
-- **Temperature** makes the process either more or less random. A _Temperature_ above 1 increasingly "levels the playing
- field", while at a _Temperature_ between 0 and 1 the likelihood of the best token candidates grows even more. A
- _Temperature_ of 0 results in selecting the best token, making the output deterministic. A _Temperature_ of 1
- represents a neutral setting with regard to randomness in the process.
-
-- _Top-p_ and _Top-K_ both narrow the field:
- - **Top-K** limits candidate tokens to a fixed number after sorting by probability. Setting it higher than the
- vocabulary size deactivates this limit.
- - **Top-p** selects tokens based on their total probabilities. For example, a value of 0.8 means "include the best
- tokens, whose accumulated probabilities reach or just surpass 80%". Setting _Top-p_ to 1, which is 100%,
- effectively disables it.
-
-The recommendation is to keep at least one of _Top-K_ and _Top-p_ active. Other parameters can also influence
-generation; be sure to review all their descriptions.
-
-
-### Specifying the Model Folder
-The model folder can be set with the `model_path` parameter when creating a `GPT4All` instance. The example below is
-is the same as if it weren't provided; that is, `~/.cache/gpt4all/` is the default folder.
-
-``` py
-from pathlib import Path
-from gpt4all import GPT4All
-model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf', model_path=Path.home() / '.cache' / 'gpt4all')
-```
-
-If you want to point it at the chat GUI's default folder, it should be:
-=== "macOS"
- ``` py
- from pathlib import Path
- from gpt4all import GPT4All
-
- model_name = 'orca-mini-3b-gguf2-q4_0.gguf'
- model_path = Path.home() / 'Library' / 'Application Support' / 'nomic.ai' / 'GPT4All'
- model = GPT4All(model_name, model_path)
- ```
-=== "Windows"
- ``` py
- from pathlib import Path
- from gpt4all import GPT4All
- import os
- model_name = 'orca-mini-3b-gguf2-q4_0.gguf'
- model_path = Path(os.environ['LOCALAPPDATA']) / 'nomic.ai' / 'GPT4All'
- model = GPT4All(model_name, model_path)
- ```
-=== "Linux"
- ``` py
- from pathlib import Path
- from gpt4all import GPT4All
-
- model_name = 'orca-mini-3b-gguf2-q4_0.gguf'
- model_path = Path.home() / '.local' / 'share' / 'nomic.ai' / 'GPT4All'
- model = GPT4All(model_name, model_path)
- ```
-
-Alternatively, you could also change the module's default model directory:
-
-``` py
-from pathlib import Path
-from gpt4all import GPT4All, gpt4all
-gpt4all.DEFAULT_MODEL_DIRECTORY = Path.home() / 'my' / 'models-directory'
-model = GPT4All('orca-mini-3b-gguf2-q4_0.gguf')
-```
-
-
-### Managing Templates
-When using a `chat_session()`, you may customize the system prompt, and set the prompt template if necessary:
-
-=== "GPT4All Custom Session Templates Example"
- ``` py
- from gpt4all import GPT4All
- model = GPT4All('wizardlm-13b-v1.2.Q4_0.gguf')
- system_template = 'A chat between a curious user and an artificial intelligence assistant.\n'
- # many models use triple hash '###' for keywords, Vicunas are simpler:
- prompt_template = 'USER: {0}\nASSISTANT: '
- with model.chat_session(system_template, prompt_template):
- response1 = model.generate('why is the grass green?')
- print(response1)
- print()
- response2 = model.generate('why is the sky blue?')
- print(response2)
- ```
-=== "Possible Output"
- ```
- The color of grass can be attributed to its chlorophyll content, which allows it
- to absorb light energy from sunlight through photosynthesis. Chlorophyll absorbs
- blue and red wavelengths of light while reflecting other colors such as yellow
- and green. This is why the leaves appear green to our eyes.
-
- The color of the sky appears blue due to a phenomenon called Rayleigh scattering,
- which occurs when sunlight enters Earth's atmosphere and interacts with air
- molecules such as nitrogen and oxygen. Blue light has shorter wavelength than
- other colors in the visible spectrum, so it is scattered more easily by these
- particles, making the sky appear blue to our eyes.
- ```
-
-
-### Without Online Connectivity
-To prevent GPT4All from accessing online resources, instantiate it with `allow_download=False`. When using this flag,
-there will be no default system prompt by default, and you must specify the prompt template yourself.
-
-You can retrieve a model's default system prompt and prompt template with an online instance of GPT4All:
-
-=== "Prompt Template Retrieval"
- ``` py
- from gpt4all import GPT4All
- model = GPT4All('orca-mini-3b-gguf2-q4_0.gguf')
- print(repr(model.config['systemPrompt']))
- print(repr(model.config['promptTemplate']))
- ```
-=== "Output"
- ```py
- '### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n'
- '### User:\n{0}\n### Response:\n'
- ```
-
-Then you can pass them explicitly when creating an offline instance:
-
-``` py
-from gpt4all import GPT4All
-model = GPT4All('orca-mini-3b-gguf2-q4_0.gguf', allow_download=False)
-
-system_prompt = '### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n'
-prompt_template = '### User:\n{0}\n\n### Response:\n'
-
-with model.chat_session(system_prompt=system_prompt, prompt_template=prompt_template):
- ...
-```
-
-### Interrupting Generation
-The simplest way to stop generation is to set a fixed upper limit with the `max_tokens` parameter.
-
-If you know exactly when a model should stop responding, you can add a custom callback, like so:
-
-=== "GPT4All Custom Stop Callback"
- ``` py
- from gpt4all import GPT4All
- model = GPT4All('orca-mini-3b-gguf2-q4_0.gguf')
-
- def stop_on_token_callback(token_id, token_string):
- # one sentence is enough:
- if '.' in token_string:
- return False
- else:
- return True
-
- response = model.generate('Blue Whales are the biggest animal to ever inhabit the Earth.',
- temp=0, callback=stop_on_token_callback)
- print(response)
- ```
-=== "Output"
- ```
- They can grow up to 100 feet (30 meters) long and weigh as much as 20 tons (18 metric tons).
- ```
-
-
-## API Documentation
-::: gpt4all.gpt4all.GPT4All
diff --git a/gpt4all-bindings/python/docs/old/gpt4all_python_embedding.md b/gpt4all-bindings/python/docs/old/gpt4all_python_embedding.md
deleted file mode 100644
index a74847cf..00000000
--- a/gpt4all-bindings/python/docs/old/gpt4all_python_embedding.md
+++ /dev/null
@@ -1,176 +0,0 @@
-# Embeddings
-GPT4All supports generating high quality embeddings of arbitrary length text using any embedding model supported by llama.cpp.
-
-An embedding is a vector representation of a piece of text. Embeddings are useful for tasks such as retrieval for
-question answering (including retrieval augmented generation or *RAG*), semantic similarity search, classification, and
-topic clustering.
-
-## Supported Embedding Models
-
-The following models have built-in support in Embed4All:
-
-| Name | Embed4All `model_name` | Context Length | Embedding Length | File Size |
-|--------------------|------------------------------------------------------|---------------:|-----------------:|----------:|
-| [SBert] | all‑MiniLM‑L6‑v2.gguf2.f16.gguf | 512 | 384 | 44 MiB |
-| [Nomic Embed v1] | nomic‑embed‑text‑v1.f16.gguf | 2048 | 768 | 262 MiB |
-| [Nomic Embed v1.5] | nomic‑embed‑text‑v1.5.f16.gguf | 2048 | 64-768 | 262 MiB |
-
-The context length is the maximum number of word pieces, or *tokens*, that a model can embed at once. Embedding texts
-longer than a model's context length requires some kind of strategy; see [Embedding Longer Texts] for more information.
-
-The embedding length is the size of the vector returned by `Embed4All.embed`.
-
-[SBert]: https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2
-[Nomic Embed v1]: https://huggingface.co/nomic-ai/nomic-embed-text-v1
-[Nomic Embed v1.5]: https://huggingface.co/nomic-ai/nomic-embed-text-v1.5
-[Embedding Longer Texts]: #embedding-longer-texts
-
-## Quickstart
-```bash
-pip install gpt4all
-```
-
-### Generating Embeddings
-By default, embeddings will be generated on the CPU using all-MiniLM-L6-v2.
-
-=== "Embed4All Example"
- ```py
- from gpt4all import Embed4All
- text = 'The quick brown fox jumps over the lazy dog'
- embedder = Embed4All()
- output = embedder.embed(text)
- print(output)
- ```
-=== "Output"
- ```
- [0.034696947783231735, -0.07192722707986832, 0.06923297047615051, ...]
- ```
-
-You can also use the GPU to accelerate the embedding model by specifying the `device` parameter. See the [GPT4All
-constructor] for more information.
-
-=== "GPU Example"
- ```py
- from gpt4all import Embed4All
- text = 'The quick brown fox jumps over the lazy dog'
- embedder = Embed4All(device='gpu')
- output = embedder.embed(text)
- print(output)
- ```
-=== "Output"
- ```
- [0.034696947783231735, -0.07192722707986832, 0.06923297047615051, ...]
- ```
-
-[GPT4All constructor]: gpt4all_python.md#gpt4all.gpt4all.GPT4All.__init__
-
-### Nomic Embed
-
-Embed4All has built-in support for Nomic's open-source embedding model, [Nomic Embed]. When using this model, you must
-specify the task type using the `prefix` argument. This may be one of `search_query`, `search_document`,
-`classification`, or `clustering`. For retrieval applications, you should prepend `search_document` for all of your
-documents and `search_query` for your queries. See the [Nomic Embedding Guide] for more info.
-
-=== "Nomic Embed Example"
- ```py
- from gpt4all import Embed4All
- text = 'Who is Laurens van der Maaten?'
- embedder = Embed4All('nomic-embed-text-v1.f16.gguf')
- output = embedder.embed(text, prefix='search_query')
- print(output)
- ```
-=== "Output"
- ```
- [-0.013357644900679588, 0.027070969343185425, -0.0232995692640543, ...]
- ```
-
-[Nomic Embed]: https://blog.nomic.ai/posts/nomic-embed-text-v1
-[Nomic Embedding Guide]: https://docs.nomic.ai/atlas/guides/embeddings#embedding-task-types
-
-### Embedding Longer Texts
-
-Embed4All accepts a parameter called `long_text_mode`. This controls the behavior of Embed4All for texts longer than the
-context length of the embedding model.
-
-In the default mode of "mean", Embed4All will break long inputs into chunks and average their embeddings to compute the
-final result.
-
-To change this behavior, you can set the `long_text_mode` parameter to "truncate", which will truncate the input to the
-sequence length of the model before generating a single embedding.
-
-=== "Truncation Example"
- ```py
- from gpt4all import Embed4All
- text = 'The ' * 512 + 'The quick brown fox jumps over the lazy dog'
- embedder = Embed4All()
- output = embedder.embed(text, long_text_mode="mean")
- print(output)
- print()
- output = embedder.embed(text, long_text_mode="truncate")
- print(output)
- ```
-=== "Output"
- ```
- [0.0039850445464253426, 0.04558328539133072, 0.0035536508075892925, ...]
-
- [-0.009771130047738552, 0.034792833030223846, -0.013273917138576508, ...]
- ```
-
-
-### Batching
-
-You can send multiple texts to Embed4All in a single call. This can give faster results when individual texts are
-significantly smaller than `n_ctx` tokens. (`n_ctx` defaults to 2048.)
-
-=== "Batching Example"
- ```py
- from gpt4all import Embed4All
- texts = ['The quick brown fox jumps over the lazy dog', 'Foo bar baz']
- embedder = Embed4All()
- output = embedder.embed(texts)
- print(output[0])
- print()
- print(output[1])
- ```
-=== "Output"
- ```
- [0.03551332652568817, 0.06137588247656822, 0.05281158909201622, ...]
-
- [-0.03879690542817116, 0.00013223080895841122, 0.023148687556385994, ...]
- ```
-
-The number of texts that can be embedded in one pass of the model is proportional to the `n_ctx` parameter of Embed4All.
-Increasing it may increase batched embedding throughput if you have a fast GPU, at the cost of VRAM.
-```py
-embedder = Embed4All(n_ctx=4096, device='gpu')
-```
-
-
-### Resizable Dimensionality
-
-The embedding dimension of Nomic Embed v1.5 can be resized using the `dimensionality` parameter. This parameter supports
-any value between 64 and 768.
-
-Shorter embeddings use less storage, memory, and bandwidth with a small performance cost. See the [blog post] for more
-info.
-
-[blog post]: https://blog.nomic.ai/posts/nomic-embed-matryoshka
-
-=== "Matryoshka Example"
- ```py
- from gpt4all import Embed4All
- text = 'The quick brown fox jumps over the lazy dog'
- embedder = Embed4All('nomic-embed-text-v1.5.f16.gguf')
- output = embedder.embed(text, dimensionality=64)
- print(len(output))
- print(output)
- ```
-=== "Output"
- ```
- 64
- [-0.03567073494195938, 0.1301717758178711, -0.4333043396472931, ...]
- ```
-
-
-### API documentation
-::: gpt4all.gpt4all.Embed4All
diff --git a/gpt4all-bindings/python/docs/old/index.md b/gpt4all-bindings/python/docs/old/index.md
deleted file mode 100644
index 3cba28b6..00000000
--- a/gpt4all-bindings/python/docs/old/index.md
+++ /dev/null
@@ -1,71 +0,0 @@
-# GPT4All
-Welcome to the GPT4All documentation LOCAL EDIT
-
-GPT4All is an open-source software ecosystem for anyone to run large language models (LLMs) **privately** on **everyday laptop & desktop computers**. No API calls or GPUs required.
-
-The GPT4All Desktop Application is a touchpoint to interact with LLMs and integrate them with your local docs & local data for RAG (retrieval-augmented generation). No coding is required, just install the application, download the models of your choice, and you are ready to use your LLM.
-
-Your local data is **yours**. GPT4All handles the retrieval privately and on-device to fetch relevant data to support your queries to your LLM.
-
-Nomic AI oversees contributions to GPT4All to ensure quality, security, and maintainability. Additionally, Nomic AI has open-sourced code for training and deploying your own customized LLMs internally.
-
-GPT4All software is optimized to run inference of 3-13 billion parameter large language models on the CPUs of laptops, desktops and servers.
-
-=== "GPT4All Example"
- ``` py
- from gpt4all import GPT4All
- model = GPT4All("orca-mini-3b-gguf2-q4_0.gguf")
- output = model.generate("The capital of France is ", max_tokens=3)
- print(output)
- ```
-=== "Output"
- ```
- 1. Paris
- ```
-See [Python Bindings](gpt4all_python.md) to use GPT4All.
-
-### Navigating the Documentation
-In an effort to ensure cross-operating-system and cross-language compatibility, the [GPT4All software ecosystem](https://github.com/nomic-ai/gpt4all)
-is organized as a monorepo with the following structure:
-
-- **gpt4all-backend**: The GPT4All backend maintains and exposes a universal, performance optimized C API for running inference with multi-billion parameter Transformer Decoders.
-This C API is then bound to any higher level programming language such as C++, Python, Go, etc.
-- **gpt4all-bindings**: GPT4All bindings contain a variety of high-level programming languages that implement the C API. Each directory is a bound programming language. The [CLI](gpt4all_cli.md) is included here, as well.
-- **gpt4all-chat**: GPT4All Chat is an OS native chat application that runs on macOS, Windows and Linux. It is the easiest way to run local, privacy aware chat assistants on everyday hardware. You can download it on the [GPT4All Website](https://gpt4all.io) and read its source code in the monorepo.
-
-Explore detailed documentation for the backend, bindings and chat client in the sidebar.
-## Models
-The GPT4All software ecosystem is compatible with the following Transformer architectures:
-
-- `Falcon`
-- `LLaMA` (including `OpenLLaMA`)
-- `MPT` (including `Replit`)
-- `GPT-J`
-
-You can find an exhaustive list of supported models on the [website](https://gpt4all.io) or in the [models directory](https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json)
-
-
-GPT4All models are artifacts produced through a process known as neural network quantization.
-A multi-billion parameter Transformer Decoder usually takes 30+ GB of VRAM to execute a forward pass.
-Most people do not have such a powerful computer or access to GPU hardware. By running trained LLMs through quantization algorithms,
-some GPT4All models can run on your laptop using only 4-8GB of RAM enabling their wide-spread usage.
-Bigger models might still require more RAM, however.
-
-Any model trained with one of these architectures can be quantized and run locally with all GPT4All bindings and in the
-chat client. You can add new variants by contributing to the gpt4all-backend.
-
-## Frequently Asked Questions
-Find answers to frequently asked questions by searching the [Github issues](https://github.com/nomic-ai/gpt4all/issues) or in the [documentation FAQ](gpt4all_faq.md).
-
-## Getting the most of your local LLM
-
-**Inference Speed**
-of a local LLM depends on two factors: model size and the number of tokens given as input.
-It is not advised to prompt local LLMs with large chunks of context as their inference speed will heavily degrade.
-You will likely want to run GPT4All models on GPU if you would like to utilize context windows larger than 750 tokens. Native GPU support for GPT4All models is planned.
-
-**Inference Performance:**
-Which model is best? That question depends on your use-case. The ability of an LLM to faithfully follow instructions is conditioned
-on the quantity and diversity of the pre-training data it trained on and the diversity, quality and factuality of the data the LLM
-was fine-tuned on. A goal of GPT4All is to bring the most powerful local assistant model to your desktop and Nomic AI is actively
-working on efforts to improve their performance and quality.
diff --git a/gpt4all-bindings/python/gpt4all/__init__.py b/gpt4all-bindings/python/gpt4all/__init__.py
deleted file mode 100644
index 1952119c..00000000
--- a/gpt4all-bindings/python/gpt4all/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .gpt4all import CancellationError as CancellationError, Embed4All as Embed4All, GPT4All as GPT4All
diff --git a/gpt4all-bindings/python/gpt4all/_pyllmodel.py b/gpt4all-bindings/python/gpt4all/_pyllmodel.py
deleted file mode 100644
index 616ce80a..00000000
--- a/gpt4all-bindings/python/gpt4all/_pyllmodel.py
+++ /dev/null
@@ -1,616 +0,0 @@
-from __future__ import annotations
-
-import ctypes
-import os
-import platform
-import subprocess
-import sys
-import textwrap
-import threading
-from enum import Enum
-from queue import Queue
-from typing import TYPE_CHECKING, Any, Callable, Generic, Iterable, Iterator, Literal, NoReturn, TypeVar, overload
-
-if sys.version_info >= (3, 9):
- import importlib.resources as importlib_resources
-else:
- import importlib_resources
-
-if (3, 9) <= sys.version_info < (3, 11):
- # python 3.9 broke generic TypedDict, python 3.11 fixed it
- from typing_extensions import TypedDict
-else:
- from typing import TypedDict
-
-if TYPE_CHECKING:
- from typing_extensions import ParamSpec, TypeAlias
- T = TypeVar("T")
- P = ParamSpec("P")
-
-EmbeddingsType = TypeVar('EmbeddingsType', bound='list[Any]')
-
-cuda_found: bool = False
-
-
-# TODO(jared): use operator.call after we drop python 3.10 support
-def _operator_call(obj: Callable[P, T], /, *args: P.args, **kwargs: P.kwargs) -> T:
- return obj(*args, **kwargs)
-
-
-# Detect Rosetta 2
-@_operator_call
-def check_rosetta() -> None:
- if platform.system() == "Darwin" and platform.processor() == "i386":
- p = subprocess.run("sysctl -n sysctl.proc_translated".split(), capture_output=True, text=True)
- if p.returncode == 0 and p.stdout.strip() == "1":
- raise RuntimeError(textwrap.dedent("""\
- Running GPT4All under Rosetta is not supported due to CPU feature requirements.
- Please install GPT4All in an environment that uses a native ARM64 Python interpreter.
- """).strip())
-
-
-# Check for C++ runtime libraries
-if platform.system() == "Windows":
- try:
- ctypes.CDLL("msvcp140.dll")
- ctypes.CDLL("vcruntime140.dll")
- ctypes.CDLL("vcruntime140_1.dll")
- except OSError as e:
- print(textwrap.dedent(f"""\
- {e!r}
- The Microsoft Visual C++ runtime libraries were not found. Please install them from
- https://aka.ms/vs/17/release/vc_redist.x64.exe
- """), file=sys.stderr)
-
-
-@_operator_call
-def find_cuda() -> None:
- global cuda_found
-
- def _load_cuda(rtver: str, blasver: str) -> None:
- if platform.system() == "Linux":
- cudalib = f"lib/libcudart.so.{rtver}"
- cublaslib = f"lib/libcublas.so.{blasver}"
- else: # Windows
- cudalib = fr"bin\cudart64_{rtver.replace('.', '')}.dll"
- cublaslib = fr"bin\cublas64_{blasver}.dll"
-
- # preload the CUDA libs so the backend can find them
- ctypes.CDLL(os.path.join(cuda_runtime.__path__[0], cudalib), mode=ctypes.RTLD_GLOBAL)
- ctypes.CDLL(os.path.join(cublas.__path__[0], cublaslib), mode=ctypes.RTLD_GLOBAL)
-
- # Find CUDA libraries from the official packages
- if platform.system() in ("Linux", "Windows"):
- try:
- from nvidia import cuda_runtime, cublas
- except ImportError:
- pass # CUDA is optional
- else:
- for rtver, blasver in [("12", "12"), ("11.0", "11")]:
- try:
- _load_cuda(rtver, blasver)
- cuda_found = True
- except OSError: # dlopen() does not give specific error codes
- pass # try the next one
-
-
-# TODO: provide a config file to make this more robust
-MODEL_LIB_PATH = importlib_resources.files("gpt4all") / "llmodel_DO_NOT_MODIFY" / "build"
-
-
-def load_llmodel_library():
- ext = {"Darwin": "dylib", "Linux": "so", "Windows": "dll"}[platform.system()]
-
- try:
- # macOS, Linux, MinGW
- lib = ctypes.CDLL(str(MODEL_LIB_PATH / f"libllmodel.{ext}"))
- except FileNotFoundError:
- if ext != 'dll':
- raise
- # MSVC
- lib = ctypes.CDLL(str(MODEL_LIB_PATH / "llmodel.dll"))
-
- return lib
-
-
-llmodel = load_llmodel_library()
-
-
-class LLModelPromptContext(ctypes.Structure):
- _fields_ = [
- ("n_predict", ctypes.c_int32),
- ("top_k", ctypes.c_int32),
- ("top_p", ctypes.c_float),
- ("min_p", ctypes.c_float),
- ("temp", ctypes.c_float),
- ("n_batch", ctypes.c_int32),
- ("repeat_penalty", ctypes.c_float),
- ("repeat_last_n", ctypes.c_int32),
- ("context_erase", ctypes.c_float),
- ]
-
-
-class LLModelGPUDevice(ctypes.Structure):
- _fields_ = [
- ("backend", ctypes.c_char_p),
- ("index", ctypes.c_int32),
- ("type", ctypes.c_int32),
- ("heapSize", ctypes.c_size_t),
- ("name", ctypes.c_char_p),
- ("vendor", ctypes.c_char_p),
- ]
-
-
-# Define C function signatures using ctypes
-llmodel.llmodel_model_create.argtypes = [ctypes.c_char_p]
-llmodel.llmodel_model_create.restype = ctypes.c_void_p
-
-llmodel.llmodel_model_create2.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p)]
-llmodel.llmodel_model_create2.restype = ctypes.c_void_p
-
-llmodel.llmodel_model_destroy.argtypes = [ctypes.c_void_p]
-llmodel.llmodel_model_destroy.restype = None
-
-llmodel.llmodel_loadModel.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int]
-llmodel.llmodel_loadModel.restype = ctypes.c_bool
-llmodel.llmodel_required_mem.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int]
-llmodel.llmodel_required_mem.restype = ctypes.c_size_t
-llmodel.llmodel_isModelLoaded.argtypes = [ctypes.c_void_p]
-llmodel.llmodel_isModelLoaded.restype = ctypes.c_bool
-
-PromptCallback = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int32), ctypes.c_size_t, ctypes.c_bool)
-ResponseCallback = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.c_int32, ctypes.c_char_p)
-EmbCancelCallback = ctypes.CFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_uint), ctypes.c_uint, ctypes.c_char_p)
-SpecialTokenCallback = ctypes.CFUNCTYPE(None, ctypes.c_char_p, ctypes.c_char_p)
-
-llmodel.llmodel_prompt.argtypes = [
- ctypes.c_void_p,
- ctypes.c_char_p,
- PromptCallback,
- ResponseCallback,
- ctypes.POINTER(LLModelPromptContext),
- ctypes.POINTER(ctypes.c_char_p),
-]
-
-llmodel.llmodel_prompt.restype = ctypes.c_bool
-
-llmodel.llmodel_embed.argtypes = [
- ctypes.c_void_p,
- ctypes.POINTER(ctypes.c_char_p),
- ctypes.POINTER(ctypes.c_size_t),
- ctypes.c_char_p,
- ctypes.c_int,
- ctypes.POINTER(ctypes.c_size_t),
- ctypes.c_bool,
- ctypes.c_bool,
- EmbCancelCallback,
- ctypes.POINTER(ctypes.c_char_p),
-]
-
-llmodel.llmodel_embed.restype = ctypes.POINTER(ctypes.c_float)
-
-llmodel.llmodel_free_embedding.argtypes = [ctypes.POINTER(ctypes.c_float)]
-llmodel.llmodel_free_embedding.restype = None
-
-llmodel.llmodel_setThreadCount.argtypes = [ctypes.c_void_p, ctypes.c_int32]
-llmodel.llmodel_setThreadCount.restype = None
-
-llmodel.llmodel_set_implementation_search_path.argtypes = [ctypes.c_char_p]
-llmodel.llmodel_set_implementation_search_path.restype = None
-
-llmodel.llmodel_threadCount.argtypes = [ctypes.c_void_p]
-llmodel.llmodel_threadCount.restype = ctypes.c_int32
-
-llmodel.llmodel_set_implementation_search_path(str(MODEL_LIB_PATH).encode())
-
-llmodel.llmodel_available_gpu_devices.argtypes = [ctypes.c_size_t, ctypes.POINTER(ctypes.c_int32)]
-llmodel.llmodel_available_gpu_devices.restype = ctypes.POINTER(LLModelGPUDevice)
-
-llmodel.llmodel_gpu_init_gpu_device_by_string.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_char_p]
-llmodel.llmodel_gpu_init_gpu_device_by_string.restype = ctypes.c_bool
-
-llmodel.llmodel_gpu_init_gpu_device_by_struct.argtypes = [ctypes.c_void_p, ctypes.POINTER(LLModelGPUDevice)]
-llmodel.llmodel_gpu_init_gpu_device_by_struct.restype = ctypes.c_bool
-
-llmodel.llmodel_gpu_init_gpu_device_by_int.argtypes = [ctypes.c_void_p, ctypes.c_int32]
-llmodel.llmodel_gpu_init_gpu_device_by_int.restype = ctypes.c_bool
-
-llmodel.llmodel_model_backend_name.argtypes = [ctypes.c_void_p]
-llmodel.llmodel_model_backend_name.restype = ctypes.c_char_p
-
-llmodel.llmodel_model_gpu_device_name.argtypes = [ctypes.c_void_p]
-llmodel.llmodel_model_gpu_device_name.restype = ctypes.c_char_p
-
-llmodel.llmodel_count_prompt_tokens.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_char_p)]
-llmodel.llmodel_count_prompt_tokens.restype = ctypes.c_int32
-
-llmodel.llmodel_model_foreach_special_token.argtypes = [ctypes.c_void_p, SpecialTokenCallback]
-llmodel.llmodel_model_foreach_special_token.restype = None
-
-ResponseCallbackType = Callable[[int, str], bool]
-RawResponseCallbackType = Callable[[int, bytes], bool]
-EmbCancelCallbackType: TypeAlias = 'Callable[[list[int], str], bool]'
-
-
-def empty_response_callback(token_id: int, response: str) -> bool:
- return True
-
-
-# Symbol to terminate from generator
-class Sentinel(Enum):
- TERMINATING_SYMBOL = 0
-
-
-class EmbedResult(Generic[EmbeddingsType], TypedDict):
- embeddings: EmbeddingsType
- n_prompt_tokens: int
-
-
-class CancellationError(Exception):
- """raised when embedding is canceled"""
-
-
-class LLModel:
- """
- Base class and universal wrapper for GPT4All language models
- built around llmodel C-API.
-
- Parameters
- ----------
- model_path : str
- Path to the model.
- n_ctx : int
- Maximum size of context window
- ngl : int
- Number of GPU layers to use (Vulkan)
- backend : str
- Backend to use. One of 'auto', 'cpu', 'metal', 'kompute', or 'cuda'.
- """
-
- def __init__(self, model_path: str, n_ctx: int, ngl: int, backend: str):
- self.model_path = model_path.encode()
- self.n_ctx = n_ctx
- self.ngl = ngl
- self.buffer = bytearray()
- self.buff_expecting_cont_bytes: int = 0
-
- # Construct a model implementation
- err = ctypes.c_char_p()
- model = llmodel.llmodel_model_create2(self.model_path, backend.encode(), ctypes.byref(err))
- if model is None:
- s = err.value
- errmsg = 'null' if s is None else s.decode()
-
- if (
- backend == 'cuda'
- and not cuda_found
- and errmsg.startswith('Could not find any implementations for backend')
- ):
- print('WARNING: CUDA runtime libraries not found. Try `pip install "gpt4all[cuda]"`\n', file=sys.stderr)
-
- raise RuntimeError(f"Unable to instantiate model: {errmsg}")
- self.model: ctypes.c_void_p | None = model
- self.special_tokens_map: dict[str, str] = {}
- llmodel.llmodel_model_foreach_special_token(
- self.model, lambda n, t: self.special_tokens_map.__setitem__(n.decode(), t.decode()),
- )
-
- def __del__(self, llmodel=llmodel):
- if hasattr(self, 'model'):
- self.close()
-
- def close(self) -> None:
- if self.model is not None:
- llmodel.llmodel_model_destroy(self.model)
- self.model = None
-
- def _raise_closed(self) -> NoReturn:
- raise ValueError("Attempted operation on a closed LLModel")
-
- @property
- def backend(self) -> Literal["cpu", "kompute", "cuda", "metal"]:
- if self.model is None:
- self._raise_closed()
- return llmodel.llmodel_model_backend_name(self.model).decode()
-
- @property
- def device(self) -> str | None:
- if self.model is None:
- self._raise_closed()
- dev = llmodel.llmodel_model_gpu_device_name(self.model)
- return None if dev is None else dev.decode()
-
- def count_prompt_tokens(self, prompt: str) -> int:
- if self.model is None:
- self._raise_closed()
- err = ctypes.c_char_p()
- n_tok = llmodel.llmodel_count_prompt_tokens(self.model, prompt, ctypes.byref(err))
- if n_tok < 0:
- s = err.value
- errmsg = 'null' if s is None else s.decode()
- raise RuntimeError(f'Unable to count prompt tokens: {errmsg}')
- return n_tok
-
- llmodel.llmodel_count_prompt_tokens.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
-
- @staticmethod
- def list_gpus(mem_required: int = 0) -> list[str]:
- """
- List the names of the available GPU devices with at least `mem_required` bytes of VRAM.
-
- Args:
- mem_required: The minimum amount of VRAM, in bytes
-
- Returns:
- A list of strings representing the names of the available GPU devices.
- """
- num_devices = ctypes.c_int32(0)
- devices_ptr = llmodel.llmodel_available_gpu_devices(mem_required, ctypes.byref(num_devices))
- if not devices_ptr:
- raise ValueError("Unable to retrieve available GPU devices")
- return [f'{d.backend.decode()}:{d.name.decode()}' for d in devices_ptr[:num_devices.value]]
-
- def init_gpu(self, device: str):
- if self.model is None:
- self._raise_closed()
-
- mem_required = llmodel.llmodel_required_mem(self.model, self.model_path, self.n_ctx, self.ngl)
-
- if llmodel.llmodel_gpu_init_gpu_device_by_string(self.model, mem_required, device.encode()):
- return
-
- all_gpus = self.list_gpus()
- available_gpus = self.list_gpus(mem_required)
- unavailable_gpus = [g for g in all_gpus if g not in available_gpus]
-
- error_msg = (f"Unable to initialize model on GPU: {device!r}" +
- f"\nAvailable GPUs: {available_gpus}")
- if unavailable_gpus:
- error_msg += f"\nUnavailable GPUs due to insufficient memory: {unavailable_gpus}"
- raise ValueError(error_msg)
-
- def load_model(self) -> bool:
- """
- Load model from a file.
-
- Returns
- -------
- True if model loaded successfully, False otherwise
- """
- if self.model is None:
- self._raise_closed()
-
- return llmodel.llmodel_loadModel(self.model, self.model_path, self.n_ctx, self.ngl)
-
- def set_thread_count(self, n_threads):
- if self.model is None:
- self._raise_closed()
- if not llmodel.llmodel_isModelLoaded(self.model):
- raise Exception("Model not loaded")
- llmodel.llmodel_setThreadCount(self.model, n_threads)
-
- def thread_count(self):
- if self.model is None:
- self._raise_closed()
- if not llmodel.llmodel_isModelLoaded(self.model):
- raise Exception("Model not loaded")
- return llmodel.llmodel_threadCount(self.model)
-
- @overload
- def generate_embeddings(
- self, text: str, prefix: str | None, dimensionality: int, do_mean: bool, atlas: bool,
- cancel_cb: EmbCancelCallbackType | None,
- ) -> EmbedResult[list[float]]: ...
- @overload
- def generate_embeddings(
- self, text: list[str], prefix: str | None, dimensionality: int, do_mean: bool, atlas: bool,
- cancel_cb: EmbCancelCallbackType | None,
- ) -> EmbedResult[list[list[float]]]: ...
- @overload
- def generate_embeddings(
- self, text: str | list[str], prefix: str | None, dimensionality: int, do_mean: bool, atlas: bool,
- cancel_cb: EmbCancelCallbackType | None,
- ) -> EmbedResult[list[Any]]: ...
-
- def generate_embeddings(
- self, text: str | list[str], prefix: str | None, dimensionality: int, do_mean: bool, atlas: bool,
- cancel_cb: EmbCancelCallbackType | None,
- ) -> EmbedResult[list[Any]]:
- if not text:
- raise ValueError("text must not be None or empty")
-
- if self.model is None:
- self._raise_closed()
-
- if single_text := isinstance(text, str):
- text = [text]
-
- # prepare input
- embedding_size = ctypes.c_size_t()
- token_count = ctypes.c_size_t()
- error = ctypes.c_char_p()
- c_prefix = ctypes.c_char_p() if prefix is None else prefix.encode()
- c_texts = (ctypes.c_char_p * (len(text) + 1))()
- for i, t in enumerate(text):
- c_texts[i] = t.encode()
-
- def wrap_cancel_cb(batch_sizes: Any, n_batch: int, backend: bytes) -> bool:
- assert cancel_cb is not None
- return cancel_cb(batch_sizes[:n_batch], backend.decode())
-
- cancel_cb_wrapper = EmbCancelCallback() if cancel_cb is None else EmbCancelCallback(wrap_cancel_cb)
-
- # generate the embeddings
- embedding_ptr = llmodel.llmodel_embed(
- self.model, c_texts, ctypes.byref(embedding_size), c_prefix, dimensionality, ctypes.byref(token_count),
- do_mean, atlas, cancel_cb_wrapper, ctypes.byref(error),
- )
-
- if not embedding_ptr:
- msg = "(unknown error)" if error.value is None else error.value.decode()
- if msg == "operation was canceled":
- raise CancellationError(msg)
- raise RuntimeError(f'Failed to generate embeddings: {msg}')
-
- # extract output
- n_embd = embedding_size.value // len(text)
- embedding_array = [
- embedding_ptr[i:i + n_embd]
- for i in range(0, embedding_size.value, n_embd)
- ]
- llmodel.llmodel_free_embedding(embedding_ptr)
-
- embeddings = embedding_array[0] if single_text else embedding_array
- return {'embeddings': embeddings, 'n_prompt_tokens': token_count.value}
-
- def prompt_model(
- self,
- prompt : str,
- callback : ResponseCallbackType,
- n_predict : int = 4096,
- top_k : int = 40,
- top_p : float = 0.9,
- min_p : float = 0.0,
- temp : float = 0.1,
- n_batch : int = 8,
- repeat_penalty : float = 1.2,
- repeat_last_n : int = 10,
- context_erase : float = 0.75,
- reset_context : bool = False,
- ):
- """
- Generate response from model from a prompt.
-
- Parameters
- ----------
- prompt: str
- Question, task, or conversation for model to respond to
- callback(token_id:int, response:str): bool
- The model sends response tokens to callback
-
- Returns
- -------
- None
- """
-
- if self.model is None:
- self._raise_closed()
-
- self.buffer.clear()
- self.buff_expecting_cont_bytes = 0
-
- context = LLModelPromptContext(
- n_predict = n_predict,
- top_k = top_k,
- top_p = top_p,
- min_p = min_p,
- temp = temp,
- n_batch = n_batch,
- repeat_penalty = repeat_penalty,
- repeat_last_n = repeat_last_n,
- context_erase = context_erase,
- )
-
- error_msg: bytes | None = None
- def error_callback(msg: bytes) -> None:
- nonlocal error_msg
- error_msg = msg
-
- err = ctypes.c_char_p()
- if not llmodel.llmodel_prompt(
- self.model,
- ctypes.c_char_p(prompt.encode()),
- PromptCallback(self._prompt_callback),
- ResponseCallback(self._callback_decoder(callback)),
- context,
- ctypes.byref(err),
- ):
- s = err.value
- raise RuntimeError(f"prompt error: {'null' if s is None else s.decode()}")
-
- def prompt_model_streaming(
- self, prompt: str, callback: ResponseCallbackType = empty_response_callback, **kwargs: Any,
- ) -> Iterator[str]:
- if self.model is None:
- self._raise_closed()
-
- output_queue: Queue[str | Sentinel] = Queue()
-
- # Put response tokens into an output queue
- def _generator_callback_wrapper(callback: ResponseCallbackType) -> ResponseCallbackType:
- def _generator_callback(token_id: int, response: str):
- nonlocal callback
-
- if callback(token_id, response):
- output_queue.put(response)
- return True
-
- return False
-
- return _generator_callback
-
- def run_llmodel_prompt(prompt: str, callback: ResponseCallbackType, **kwargs):
- self.prompt_model(prompt, callback, **kwargs)
- output_queue.put(Sentinel.TERMINATING_SYMBOL)
-
- # Kick off llmodel_prompt in separate thread so we can return generator
- # immediately
- thread = threading.Thread(
- target=run_llmodel_prompt,
- args=(prompt, _generator_callback_wrapper(callback)),
- kwargs=kwargs,
- )
- thread.start()
-
- # Generator
- while True:
- response = output_queue.get()
- if isinstance(response, Sentinel):
- break
- yield response
-
- def _callback_decoder(self, callback: ResponseCallbackType) -> RawResponseCallbackType:
- def _raw_callback(token_id: int, response: bytes) -> bool:
- nonlocal self, callback
-
- decoded = []
-
- for byte in response:
-
- bits = "{:08b}".format(byte)
- (high_ones, _, _) = bits.partition('0')
-
- if len(high_ones) == 1:
- # continuation byte
- self.buffer.append(byte)
- self.buff_expecting_cont_bytes -= 1
-
- else:
- # beginning of a byte sequence
- if len(self.buffer) > 0:
- decoded.append(self.buffer.decode(errors='replace'))
-
- self.buffer.clear()
-
- self.buffer.append(byte)
- self.buff_expecting_cont_bytes = max(0, len(high_ones) - 1)
-
- if self.buff_expecting_cont_bytes <= 0:
- # received the whole sequence or an out of place continuation byte
- decoded.append(self.buffer.decode(errors='replace'))
-
- self.buffer.clear()
- self.buff_expecting_cont_bytes = 0
-
- if len(decoded) == 0 and self.buff_expecting_cont_bytes > 0:
- # wait for more continuation bytes
- return True
-
- return callback(token_id, ''.join(decoded))
-
- return _raw_callback
-
- # Empty prompt callback
- @staticmethod
- def _prompt_callback(token_ids: ctypes._Pointer[ctypes.c_int32], n_token_ids: int, cached: bool) -> bool:
- return True
diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py
deleted file mode 100644
index 84b236c9..00000000
--- a/gpt4all-bindings/python/gpt4all/gpt4all.py
+++ /dev/null
@@ -1,674 +0,0 @@
-"""
-Python only API for running all GPT4All models.
-"""
-from __future__ import annotations
-
-import hashlib
-import json
-import os
-import platform
-import re
-import sys
-import warnings
-from contextlib import contextmanager
-from datetime import datetime
-from pathlib import Path
-from types import TracebackType
-from typing import TYPE_CHECKING, Any, Iterable, Iterator, Literal, NamedTuple, NoReturn, Protocol, TypedDict, overload
-
-import jinja2
-import requests
-from jinja2.sandbox import ImmutableSandboxedEnvironment
-from requests.exceptions import ChunkedEncodingError
-from tqdm import tqdm
-from urllib3.exceptions import IncompleteRead, ProtocolError
-
-from ._pyllmodel import (CancellationError as CancellationError, EmbCancelCallbackType, EmbedResult as EmbedResult,
- LLModel, ResponseCallbackType, _operator_call, empty_response_callback)
-
-if TYPE_CHECKING:
- from typing_extensions import Self, TypeAlias
-
-if sys.platform == "darwin":
- import fcntl
-
-# TODO: move to config
-DEFAULT_MODEL_DIRECTORY = Path.home() / ".cache" / "gpt4all"
-
-ConfigType: TypeAlias = "dict[str, Any]"
-
-# Environment setup adapted from HF transformers
-@_operator_call
-def _jinja_env() -> ImmutableSandboxedEnvironment:
- def raise_exception(message: str) -> NoReturn:
- raise jinja2.exceptions.TemplateError(message)
-
- def tojson(obj: Any, indent: int | None = None) -> str:
- return json.dumps(obj, ensure_ascii=False, indent=indent)
-
- def strftime_now(fmt: str) -> str:
- return datetime.now().strftime(fmt)
-
- env = ImmutableSandboxedEnvironment(trim_blocks=True, lstrip_blocks=True)
- env.filters["tojson" ] = tojson
- env.globals["raise_exception"] = raise_exception
- env.globals["strftime_now" ] = strftime_now
- return env
-
-
-class MessageType(TypedDict):
- role: str
- content: str
-
-
-class ChatSession(NamedTuple):
- template: jinja2.Template
- history: list[MessageType]
-
-
-class Embed4All:
- """
- Python class that handles embeddings for GPT4All.
- """
-
- MIN_DIMENSIONALITY = 64
-
- def __init__(self, model_name: str | None = None, *, n_threads: int | None = None, device: str | None = None, **kwargs: Any):
- """
- Constructor
-
- Args:
- n_threads: number of CPU threads used by GPT4All. Default is None, then the number of threads are determined automatically.
- device: The processing unit on which the embedding model will run. See the `GPT4All` constructor for more info.
- kwargs: Remaining keyword arguments are passed to the `GPT4All` constructor.
- """
- if model_name is None:
- model_name = "all-MiniLM-L6-v2.gguf2.f16.gguf"
- self.gpt4all = GPT4All(model_name, n_threads=n_threads, device=device, **kwargs)
-
- def __enter__(self) -> Self:
- return self
-
- def __exit__(
- self, typ: type[BaseException] | None, value: BaseException | None, tb: TracebackType | None,
- ) -> None:
- self.close()
-
- def close(self) -> None:
- """Delete the model instance and free associated system resources."""
- self.gpt4all.close()
-
- # return_dict=False
- @overload
- def embed(
- self, text: str, *, prefix: str | None = ..., dimensionality: int | None = ..., long_text_mode: str = ...,
- return_dict: Literal[False] = ..., atlas: bool = ..., cancel_cb: EmbCancelCallbackType | None = ...,
- ) -> list[float]: ...
- @overload
- def embed(
- self, text: list[str], *, prefix: str | None = ..., dimensionality: int | None = ..., long_text_mode: str = ...,
- return_dict: Literal[False] = ..., atlas: bool = ..., cancel_cb: EmbCancelCallbackType | None = ...,
- ) -> list[list[float]]: ...
- @overload
- def embed(
- self, text: str | list[str], *, prefix: str | None = ..., dimensionality: int | None = ...,
- long_text_mode: str = ..., return_dict: Literal[False] = ..., atlas: bool = ...,
- cancel_cb: EmbCancelCallbackType | None = ...,
- ) -> list[Any]: ...
-
- # return_dict=True
- @overload
- def embed(
- self, text: str, *, prefix: str | None = ..., dimensionality: int | None = ..., long_text_mode: str = ...,
- return_dict: Literal[True], atlas: bool = ..., cancel_cb: EmbCancelCallbackType | None = ...,
- ) -> EmbedResult[list[float]]: ...
- @overload
- def embed(
- self, text: list[str], *, prefix: str | None = ..., dimensionality: int | None = ..., long_text_mode: str = ...,
- return_dict: Literal[True], atlas: bool = ..., cancel_cb: EmbCancelCallbackType | None = ...,
- ) -> EmbedResult[list[list[float]]]: ...
- @overload
- def embed(
- self, text: str | list[str], *, prefix: str | None = ..., dimensionality: int | None = ...,
- long_text_mode: str = ..., return_dict: Literal[True], atlas: bool = ...,
- cancel_cb: EmbCancelCallbackType | None = ...,
- ) -> EmbedResult[list[Any]]: ...
-
- # return type unknown
- @overload
- def embed(
- self, text: str | list[str], *, prefix: str | None = ..., dimensionality: int | None = ...,
- long_text_mode: str = ..., return_dict: bool = ..., atlas: bool = ...,
- cancel_cb: EmbCancelCallbackType | None = ...,
- ) -> Any: ...
-
- def embed(
- self, text: str | list[str], *, prefix: str | None = None, dimensionality: int | None = None,
- long_text_mode: str = "mean", return_dict: bool = False, atlas: bool = False,
- cancel_cb: EmbCancelCallbackType | None = None,
- ) -> Any:
- """
- Generate one or more embeddings.
-
- Args:
- text: A text or list of texts to generate embeddings for.
- prefix: The model-specific prefix representing the embedding task, without the trailing colon. For Nomic
- Embed, this can be `search_query`, `search_document`, `classification`, or `clustering`. Defaults to
- `search_document` or equivalent if known; otherwise, you must explicitly pass a prefix or an empty
- string if none applies.
- dimensionality: The embedding dimension, for use with Matryoshka-capable models. Defaults to full-size.
- long_text_mode: How to handle texts longer than the model can accept. One of `mean` or `truncate`.
- return_dict: Return the result as a dict that includes the number of prompt tokens processed.
- atlas: Try to be fully compatible with the Atlas API. Currently, this means texts longer than 8192 tokens
- with long_text_mode="mean" will raise an error. Disabled by default.
- cancel_cb: Called with arguments (batch_sizes, backend_name). Return true to cancel embedding.
-
- Returns:
- With return_dict=False, an embedding or list of embeddings of your text(s).
- With return_dict=True, a dict with keys 'embeddings' and 'n_prompt_tokens'.
-
- Raises:
- CancellationError: If cancel_cb returned True and embedding was canceled.
- """
- if dimensionality is None:
- dimensionality = -1
- else:
- if dimensionality <= 0:
- raise ValueError(f"Dimensionality must be None or a positive integer, got {dimensionality}")
- if dimensionality < self.MIN_DIMENSIONALITY:
- warnings.warn(
- f"Dimensionality {dimensionality} is less than the suggested minimum of {self.MIN_DIMENSIONALITY}."
- " Performance may be degraded."
- )
- try:
- do_mean = {"mean": True, "truncate": False}[long_text_mode]
- except KeyError:
- raise ValueError(f"Long text mode must be one of 'mean' or 'truncate', got {long_text_mode!r}")
- result = self.gpt4all.model.generate_embeddings(text, prefix, dimensionality, do_mean, atlas, cancel_cb)
- return result if return_dict else result["embeddings"]
-
-
-class GPT4All:
- """
- Python class that handles instantiation, downloading, generation and chat with GPT4All models.
- """
-
- def __init__(
- self,
- model_name: str,
- *,
- model_path: str | os.PathLike[str] | None = None,
- model_type: str | None = None,
- allow_download: bool = True,
- n_threads: int | None = None,
- device: str | None = None,
- n_ctx: int = 2048,
- ngl: int = 100,
- verbose: bool = False,
- ):
- """
- Constructor
-
- Args:
- model_name: Name of GPT4All or custom model. Including ".gguf" file extension is optional but encouraged.
- model_path: Path to directory containing model file or, if file does not exist, where to download model.
- Default is None, in which case models will be stored in `~/.cache/gpt4all/`.
- model_type: Model architecture. This argument currently does not have any functionality and is just used as
- descriptive identifier for user. Default is None.
- allow_download: Allow API to download models from gpt4all.io. Default is True.
- n_threads: number of CPU threads used by GPT4All. Default is None, then the number of threads are determined automatically.
- device: The processing unit on which the GPT4All model will run. It can be set to:
- - "cpu": Model will run on the central processing unit.
- - "gpu": Use Metal on ARM64 macOS, otherwise the same as "kompute".
- - "kompute": Use the best GPU provided by the Kompute backend.
- - "cuda": Use the best GPU provided by the CUDA backend.
- - "amd", "nvidia": Use the best GPU provided by the Kompute backend from this vendor.
- - A specific device name from the list returned by `GPT4All.list_gpus()`.
- Default is Metal on ARM64 macOS, "cpu" otherwise.
-
- Note: If a selected GPU device does not have sufficient RAM to accommodate the model, an error will be thrown, and the GPT4All instance will be rendered invalid. It's advised to ensure the device has enough memory before initiating the model.
- n_ctx: Maximum size of context window
- ngl: Number of GPU layers to use (Vulkan)
- verbose: If True, print debug messages.
- """
-
- self.model_type = model_type
- self._chat_session: ChatSession | None = None
-
- device_init = None
- if sys.platform == "darwin":
- if device is None:
- backend = "auto" # "auto" is effectively "metal" due to currently non-functional fallback
- elif device == "cpu":
- backend = "cpu"
- else:
- if platform.machine() != "arm64" or device != "gpu":
- raise ValueError(f"Unknown device for this platform: {device}")
- backend = "metal"
- else:
- backend = "kompute"
- if device is None or device == "cpu":
- pass # use kompute with no device
- elif device in ("cuda", "kompute"):
- backend = device
- device_init = "gpu"
- elif device.startswith("cuda:"):
- backend = "cuda"
- device_init = _remove_prefix(device, "cuda:")
- else:
- device_init = _remove_prefix(device, "kompute:")
-
- # Retrieve model and download if allowed
- self.config: ConfigType = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download, verbose=verbose)
- self.model = LLModel(self.config["path"], n_ctx, ngl, backend)
- if device_init is not None:
- self.model.init_gpu(device_init)
- self.model.load_model()
- # Set n_threads
- if n_threads is not None:
- self.model.set_thread_count(n_threads)
-
- def __enter__(self) -> Self:
- return self
-
- def __exit__(
- self, typ: type[BaseException] | None, value: BaseException | None, tb: TracebackType | None,
- ) -> None:
- self.close()
-
- def close(self) -> None:
- """Delete the model instance and free associated system resources."""
- self.model.close()
-
- @property
- def backend(self) -> Literal["cpu", "kompute", "cuda", "metal"]:
- """The name of the llama.cpp backend currently in use. One of "cpu", "kompute", "cuda", or "metal"."""
- return self.model.backend
-
- @property
- def device(self) -> str | None:
- """The name of the GPU device currently in use, or None for backends other than Kompute or CUDA."""
- return self.model.device
-
- @property
- def current_chat_session(self) -> list[MessageType] | None:
- return None if self._chat_session is None else self._chat_session.history
-
- @current_chat_session.setter
- def current_chat_session(self, history: list[MessageType]) -> None:
- if self._chat_session is None:
- raise ValueError("current_chat_session may only be set when there is an active chat session")
- self._chat_session.history[:] = history
-
- @staticmethod
- def list_models() -> list[ConfigType]:
- """
- Fetch model list from https://gpt4all.io/models/models3.json.
-
- Returns:
- Model list in JSON format.
- """
- resp = requests.get("https://gpt4all.io/models/models3.json")
- if resp.status_code != 200:
- raise ValueError(f"Request failed: HTTP {resp.status_code} {resp.reason}")
- return resp.json()
-
- @classmethod
- def retrieve_model(
- cls,
- model_name: str,
- model_path: str | os.PathLike[str] | None = None,
- allow_download: bool = True,
- verbose: bool = False,
- ) -> ConfigType:
- """
- Find model file, and if it doesn't exist, download the model.
-
- Args:
- model_name: Name of model.
- model_path: Path to find model. Default is None in which case path is set to
- ~/.cache/gpt4all/.
- allow_download: Allow API to download model from gpt4all.io. Default is True.
- verbose: If True (default), print debug messages.
-
- Returns:
- Model config.
- """
-
- model_filename = append_extension_if_missing(model_name)
-
- # get the config for the model
- config: ConfigType = {}
- if allow_download:
- models = cls.list_models()
- if (model := next((m for m in models if m["filename"] == model_filename), None)) is not None:
- config.update(model)
-
- # Validate download directory
- if model_path is None:
- try:
- os.makedirs(DEFAULT_MODEL_DIRECTORY, exist_ok=True)
- except OSError as e:
- raise RuntimeError("Failed to create model download directory") from e
- model_path = DEFAULT_MODEL_DIRECTORY
- else:
- model_path = Path(model_path)
-
- if not model_path.exists():
- raise FileNotFoundError(f"Model directory does not exist: {model_path!r}")
-
- model_dest = model_path / model_filename
- if model_dest.exists():
- config["path"] = str(model_dest)
- if verbose:
- print(f"Found model file at {str(model_dest)!r}", file=sys.stderr)
- elif allow_download:
- # If model file does not exist, download
- filesize = config.get("filesize")
- config["path"] = str(cls.download_model(
- model_filename, model_path, verbose=verbose, url=config.get("url"),
- expected_size=None if filesize is None else int(filesize), expected_md5=config.get("md5sum"),
- ))
- else:
- raise FileNotFoundError(f"Model file does not exist: {model_dest!r}")
-
- return config
-
- @staticmethod
- def download_model(
- model_filename: str,
- model_path: str | os.PathLike[str],
- verbose: bool = True,
- url: str | None = None,
- expected_size: int | None = None,
- expected_md5: str | None = None,
- ) -> str | os.PathLike[str]:
- """
- Download model from gpt4all.io.
-
- Args:
- model_filename: Filename of model (with .gguf extension).
- model_path: Path to download model to.
- verbose: If True (default), print debug messages.
- url: the models remote url (e.g. may be hosted on HF)
- expected_size: The expected size of the download.
- expected_md5: The expected MD5 hash of the download.
-
- Returns:
- Model file destination.
- """
-
- # Download model
- if url is None:
- url = f"https://gpt4all.io/models/gguf/{model_filename}"
-
- def make_request(offset=None):
- headers = {}
- if offset:
- print(f"\nDownload interrupted, resuming from byte position {offset}", file=sys.stderr)
- headers["Range"] = f"bytes={offset}-" # resume incomplete response
- headers["Accept-Encoding"] = "identity" # Content-Encoding changes meaning of ranges
- response = requests.get(url, stream=True, headers=headers)
- if response.status_code not in (200, 206):
- raise ValueError(f"Request failed: HTTP {response.status_code} {response.reason}")
- if offset and (response.status_code != 206 or str(offset) not in response.headers.get("Content-Range", "")):
- raise ValueError("Connection was interrupted and server does not support range requests")
- if (enc := response.headers.get("Content-Encoding")) is not None:
- raise ValueError(f"Expected identity Content-Encoding, got {enc}")
- return response
-
- response = make_request()
-
- total_size_in_bytes = int(response.headers.get("content-length", 0))
- block_size = 2**20 # 1 MB
-
- partial_path = Path(model_path) / (model_filename + ".part")
-
- with open(partial_path, "w+b") as partf:
- try:
- with tqdm(desc="Downloading", total=total_size_in_bytes, unit="iB", unit_scale=True) as progress_bar:
- while True:
- last_progress = progress_bar.n
- try:
- for data in response.iter_content(block_size):
- partf.write(data)
- progress_bar.update(len(data))
- except ChunkedEncodingError as cee:
- if cee.args and isinstance(pe := cee.args[0], ProtocolError):
- if len(pe.args) >= 2 and isinstance(ir := pe.args[1], IncompleteRead):
- assert progress_bar.n <= ir.partial # urllib3 may be ahead of us but never behind
- # the socket was closed during a read - retry
- response = make_request(progress_bar.n)
- continue
- raise
- if total_size_in_bytes != 0 and progress_bar.n < total_size_in_bytes:
- if progress_bar.n == last_progress:
- raise RuntimeError("Download not making progress, aborting.")
- # server closed connection prematurely - retry
- response = make_request(progress_bar.n)
- continue
- break
-
- # verify file integrity
- file_size = partf.tell()
- if expected_size is not None and file_size != expected_size:
- raise ValueError(f"Expected file size of {expected_size} bytes, got {file_size}")
- if expected_md5 is not None:
- partf.seek(0)
- hsh = hashlib.md5()
- with tqdm(desc="Verifying", total=file_size, unit="iB", unit_scale=True) as bar:
- while chunk := partf.read(block_size):
- hsh.update(chunk)
- bar.update(len(chunk))
- if hsh.hexdigest() != expected_md5.lower():
- raise ValueError(f"Expected MD5 hash of {expected_md5!r}, got {hsh.hexdigest()!r}")
- except:
- if verbose:
- print("Cleaning up the interrupted download...", file=sys.stderr)
- try:
- os.remove(partial_path)
- except OSError:
- pass
- raise
-
- # flush buffers and sync the inode
- partf.flush()
- _fsync(partf)
-
- # move to final destination
- download_path = Path(model_path) / model_filename
- try:
- os.rename(partial_path, download_path)
- except FileExistsError:
- try:
- os.remove(partial_path)
- except OSError:
- pass
- raise
-
- if verbose:
- print(f"Model downloaded to {str(download_path)!r}", file=sys.stderr)
- return download_path
-
- @overload
- def generate(
- self, prompt: str, *, max_tokens: int = ..., temp: float = ..., top_k: int = ..., top_p: float = ...,
- min_p: float = ..., repeat_penalty: float = ..., repeat_last_n: int = ..., n_batch: int = ...,
- n_predict: int | None = ..., streaming: Literal[False] = ..., callback: ResponseCallbackType = ...,
- ) -> str: ...
- @overload
- def generate(
- self, prompt: str, *, max_tokens: int = ..., temp: float = ..., top_k: int = ..., top_p: float = ...,
- min_p: float = ..., repeat_penalty: float = ..., repeat_last_n: int = ..., n_batch: int = ...,
- n_predict: int | None = ..., streaming: Literal[True], callback: ResponseCallbackType = ...,
- ) -> Iterable[str]: ...
- @overload
- def generate(
- self, prompt: str, *, max_tokens: int = ..., temp: float = ..., top_k: int = ..., top_p: float = ...,
- min_p: float = ..., repeat_penalty: float = ..., repeat_last_n: int = ..., n_batch: int = ...,
- n_predict: int | None = ..., streaming: bool, callback: ResponseCallbackType = ...,
- ) -> Any: ...
-
- def generate(
- self,
- prompt : str,
- *,
- max_tokens : int = 200,
- temp : float = 0.7,
- top_k : int = 40,
- top_p : float = 0.4,
- min_p : float = 0.0,
- repeat_penalty : float = 1.18,
- repeat_last_n : int = 64,
- n_batch : int = 8,
- n_predict : int | None = None,
- streaming : bool = False,
- callback : ResponseCallbackType = empty_response_callback,
- ) -> Any:
- """
- Generate outputs from any GPT4All model.
-
- Args:
- prompt: The prompt for the model to complete.
- max_tokens: The maximum number of tokens to generate.
- temp: The model temperature. Larger values increase creativity but decrease factuality.
- top_k: Randomly sample from the top_k most likely tokens at each generation step. Set this to 1 for greedy decoding.
- top_p: Randomly sample at each generation step from the top most likely tokens whose probabilities add up to top_p.
- min_p: Randomly sample at each generation step from the top most likely tokens whose probabilities are at least min_p.
- repeat_penalty: Penalize the model for repetition. Higher values result in less repetition.
- repeat_last_n: How far in the models generation history to apply the repeat penalty.
- n_batch: Number of prompt tokens processed in parallel. Larger values decrease latency but increase resource requirements.
- n_predict: Equivalent to max_tokens, exists for backwards compatibility.
- streaming: If True, this method will instead return a generator that yields tokens as the model generates them.
- callback: A function with arguments token_id:int and response:str, which receives the tokens from the model as they are generated and stops the generation by returning False.
-
- Returns:
- Either the entire completion or a generator that yields the completion token by token.
- """
-
- # Preparing the model request
- generate_kwargs: dict[str, Any] = dict(
- temp = temp,
- top_k = top_k,
- top_p = top_p,
- min_p = min_p,
- repeat_penalty = repeat_penalty,
- repeat_last_n = repeat_last_n,
- n_batch = n_batch,
- n_predict = n_predict if n_predict is not None else max_tokens,
- )
-
- # Prepare the callback, process the model response
- full_response = ""
-
- def _callback_wrapper(token_id: int, response: str) -> bool:
- nonlocal full_response
- full_response += response
- return callback(token_id, response)
-
- last_msg_rendered = prompt
- if self._chat_session is not None:
- session = self._chat_session
- def render(messages: list[MessageType]) -> str:
- return session.template.render(
- messages=messages,
- add_generation_prompt=True,
- **self.model.special_tokens_map,
- )
- session.history.append(MessageType(role="user", content=prompt))
- prompt = render(session.history)
- if len(session.history) > 1:
- last_msg_rendered = render(session.history[-1:])
-
- # Check request length
- last_msg_len = self.model.count_prompt_tokens(last_msg_rendered)
- if last_msg_len > (limit := self.model.n_ctx - 4):
- raise ValueError(f"Your message was too long and could not be processed ({last_msg_len} > {limit}).")
-
- # Send the request to the model
- if streaming:
- def stream() -> Iterator[str]:
- yield from self.model.prompt_model_streaming(prompt, _callback_wrapper, **generate_kwargs)
- if self._chat_session is not None:
- self._chat_session.history.append(MessageType(role="assistant", content=full_response))
- return stream()
-
- self.model.prompt_model(prompt, _callback_wrapper, **generate_kwargs)
- if self._chat_session is not None:
- self._chat_session.history.append(MessageType(role="assistant", content=full_response))
- return full_response
-
- @contextmanager
- def chat_session(
- self,
- system_message: str | Literal[False] | None = None,
- chat_template: str | None = None,
- ):
- """
- Context manager to hold an inference optimized chat session with a GPT4All model.
-
- Args:
- system_message: An initial instruction for the model, None to use the model default, or False to disable. Defaults to None.
- chat_template: Jinja template for the conversation, or None to use the model default. Defaults to None.
- """
-
- if system_message is None:
- system_message = self.config.get("systemMessage", False)
-
- if chat_template is None:
- if "name" not in self.config:
- raise ValueError("For sideloaded models or with allow_download=False, you must specify a chat template.")
- if "chatTemplate" not in self.config:
- raise NotImplementedError("This model appears to have a built-in chat template, but loading it is not "
- "currently implemented. Please pass a template to chat_session() directly.")
- if (tmpl := self.config["chatTemplate"]) is None:
- raise ValueError(f"The model {self.config['name']!r} does not support chat.")
- chat_template = tmpl
-
- history = []
- if system_message is not False:
- history.append(MessageType(role="system", content=system_message))
- self._chat_session = ChatSession(
- template=_jinja_env.from_string(chat_template),
- history=history,
- )
- try:
- yield self
- finally:
- self._chat_session = None
-
- @staticmethod
- def list_gpus() -> list[str]:
- """
- List the names of the available GPU devices.
-
- Returns:
- A list of strings representing the names of the available GPU devices.
- """
- return LLModel.list_gpus()
-
-
-def append_extension_if_missing(model_name):
- if not model_name.endswith((".bin", ".gguf")):
- model_name += ".gguf"
- return model_name
-
-
-class _HasFileno(Protocol):
- def fileno(self) -> int: ...
-
-
-def _fsync(fd: int | _HasFileno) -> None:
- if sys.platform == "darwin":
- # Apple's fsync does not flush the drive write cache
- try:
- fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
- except OSError:
- pass # fall back to fsync
- else:
- return
- os.fsync(fd)
-
-
-def _remove_prefix(s: str, prefix: str) -> str:
- return s[len(prefix):] if s.startswith(prefix) else s
diff --git a/gpt4all-bindings/python/gpt4all/tests/__init__.py b/gpt4all-bindings/python/gpt4all/tests/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py b/gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py
deleted file mode 100755
index 799c035b..00000000
--- a/gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python3
-import sys
-import time
-from io import StringIO
-
-from gpt4all import Embed4All, GPT4All
-
-
-def time_embedding(i, embedder):
- text = 'foo bar ' * i
- start_time = time.time()
- output = embedder.embed(text)
- end_time = time.time()
- elapsed_time = end_time - start_time
- print(f"Time report: {2 * i / elapsed_time} tokens/second with {2 * i} tokens taking {elapsed_time} seconds")
-
-
-if __name__ == "__main__":
- embedder = Embed4All(n_threads=8)
- for i in [2**n for n in range(6, 14)]:
- time_embedding(i, embedder)
diff --git a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py
deleted file mode 100644
index 93df4912..00000000
--- a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import sys
-from io import StringIO
-from pathlib import Path
-
-from gpt4all import GPT4All, Embed4All
-import time
-import pytest
-
-
-def test_inference():
- model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')
- output_1 = model.generate('hello', top_k=1)
-
- with model.chat_session():
- response = model.generate(prompt='hello', top_k=1)
- response = model.generate(prompt='write me a short poem', top_k=1)
- response = model.generate(prompt='thank you', top_k=1)
- print(model.current_chat_session)
-
- output_2 = model.generate('hello', top_k=1)
-
- assert output_1 == output_2
-
- tokens = []
- for token in model.generate('hello', streaming=True):
- tokens.append(token)
-
- assert len(tokens) > 0
-
- with model.chat_session():
- model.generate(prompt='hello', top_k=1, streaming=True)
- model.generate(prompt='write me a poem about dogs', top_k=1, streaming=True)
- print(model.current_chat_session)
-
-
-def do_long_input(model):
- long_input = " ".join(["hello how are you"] * 40)
-
- with model.chat_session():
- # llmodel should limit us to 128 even if we ask for more
- model.generate(long_input, n_batch=512)
- print(model.current_chat_session)
-
-
-def test_inference_long_orca_3b():
- model = GPT4All(model_name="orca-mini-3b-gguf2-q4_0.gguf")
- do_long_input(model)
-
-
-def test_inference_long_falcon():
- model = GPT4All(model_name='gpt4all-falcon-q4_0.gguf')
- do_long_input(model)
-
-
-def test_inference_long_llama_7b():
- model = GPT4All(model_name="mistral-7b-openorca.Q4_0.gguf")
- do_long_input(model)
-
-
-def test_inference_long_llama_13b():
- model = GPT4All(model_name='nous-hermes-llama2-13b.Q4_0.gguf')
- do_long_input(model)
-
-
-def test_inference_long_mpt():
- model = GPT4All(model_name='mpt-7b-chat-q4_0.gguf')
- do_long_input(model)
-
-
-def test_inference_long_replit():
- model = GPT4All(model_name='replit-code-v1_5-3b-q4_0.gguf')
- do_long_input(model)
-
-
-def test_inference_hparams():
- model = GPT4All(model_name='orca-mini-3b-gguf2-q4_0.gguf')
-
- output = model.generate("The capital of france is ", max_tokens=3)
- assert 'Paris' in output
-
-
-def test_inference_falcon():
- model = GPT4All(model_name='gpt4all-falcon-q4_0.gguf')
- prompt = 'hello'
- output = model.generate(prompt)
- assert isinstance(output, str)
- assert len(output) > 0
-
-
-def test_inference_mpt():
- model = GPT4All(model_name='mpt-7b-chat-q4_0.gguf')
- prompt = 'hello'
- output = model.generate(prompt)
- assert isinstance(output, str)
- assert len(output) > 0
-
-
-def test_embedding():
- text = 'The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox'
- embedder = Embed4All()
- output = embedder.embed(text)
- #for i, value in enumerate(output):
- #print(f'Value at index {i}: {value}')
- assert len(output) == 384
-
-
-def test_empty_embedding():
- text = ''
- embedder = Embed4All()
- with pytest.raises(ValueError):
- output = embedder.embed(text)
-
-def test_download_model(tmp_path: Path):
- from gpt4all import gpt4all
- old_default_dir = gpt4all.DEFAULT_MODEL_DIRECTORY
- gpt4all.DEFAULT_MODEL_DIRECTORY = tmp_path # temporary pytest directory to ensure a download happens
- try:
- model = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin')
- model_path = tmp_path / model.config['filename']
- assert model_path.absolute() == Path(model.config['path']).absolute()
- assert model_path.stat().st_size == int(model.config['filesize'])
- finally:
- gpt4all.DEFAULT_MODEL_DIRECTORY = old_default_dir
diff --git a/gpt4all-bindings/python/makefile b/gpt4all-bindings/python/makefile
deleted file mode 100644
index 0b3395e5..00000000
--- a/gpt4all-bindings/python/makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-SHELL:=/bin/bash -o pipefail
-ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
-PYTHON:=python3
-
-env:
- if [ ! -d $(ROOT_DIR)/env ]; then $(PYTHON) -m venv $(ROOT_DIR)/env; fi
-
-dev: env
- source env/bin/activate; pip install black isort pytest; pip install -e .
-
-documentation:
- rm -rf ./site && mkdocs build
-
-wheel:
- rm -rf dist/ build/ gpt4all/llmodel_DO_NOT_MODIFY; python setup.py bdist_wheel;
-
-clean:
- rm -rf {.pytest_cache,env,gpt4all.egg-info}
- find . | grep -E "(__pycache__|\.pyc|\.pyo$\)" | xargs rm -rf
-
-black:
- source env/bin/activate; black -l 120 -S --target-version py36 gpt4all
-
-isort:
- source env/bin/activate; isort --ignore-whitespace --atomic -w 120 gpt4all
-
-test:
- source env/bin/activate; pytest -s gpt4all/tests -k "not test_inference_long"
-
-test_all:
- source env/bin/activate; pytest -s gpt4all/tests
diff --git a/gpt4all-bindings/python/setup.py b/gpt4all-bindings/python/setup.py
deleted file mode 100644
index b316adc0..00000000
--- a/gpt4all-bindings/python/setup.py
+++ /dev/null
@@ -1,123 +0,0 @@
-from setuptools import setup, find_packages
-import os
-import pathlib
-import platform
-import shutil
-
-package_name = "gpt4all"
-
-# Define the location of your prebuilt C library files
-SRC_CLIB_DIRECTORY = os.path.join("..", "..", "gpt4all-backend")
-SRC_CLIB_BUILD_DIRECTORY = os.path.join("..", "..", "gpt4all-backend", "build")
-
-LIB_NAME = "llmodel"
-
-DEST_CLIB_DIRECTORY = os.path.join(package_name, f"{LIB_NAME}_DO_NOT_MODIFY")
-DEST_CLIB_BUILD_DIRECTORY = os.path.join(DEST_CLIB_DIRECTORY, "build")
-
-system = platform.system()
-
-def get_c_shared_lib_extension():
-
- if system == "Darwin":
- return "dylib"
- elif system == "Linux":
- return "so"
- elif system == "Windows":
- return "dll"
- else:
- raise Exception("Operating System not supported")
-
-lib_ext = get_c_shared_lib_extension()
-
-def copy_prebuilt_C_lib(src_dir, dest_dir, dest_build_dir):
- files_copied = 0
-
- if not os.path.exists(dest_dir):
- os.mkdir(dest_dir)
- os.mkdir(dest_build_dir)
-
- for dirpath, _, filenames in os.walk(src_dir):
- for item in filenames:
- # copy over header files to dest dir
- s = os.path.join(dirpath, item)
- if item.endswith(".h"):
- d = os.path.join(dest_dir, item)
- shutil.copy2(s, d)
- files_copied += 1
- if item.endswith(lib_ext) or item.endswith('.metallib'):
- s = os.path.join(dirpath, item)
- d = os.path.join(dest_build_dir, item)
- shutil.copy2(s, d)
- files_copied += 1
-
- return files_copied
-
-
-# NOTE: You must provide correct path to the prebuilt llmodel C library.
-# Specifically, the llmodel.h and C shared library are needed.
-copy_prebuilt_C_lib(SRC_CLIB_DIRECTORY,
- DEST_CLIB_DIRECTORY,
- DEST_CLIB_BUILD_DIRECTORY)
-
-
-def get_long_description():
- with open(pathlib.Path(__file__).parent / "README.md", encoding="utf-8") as fp:
- return fp.read()
-
-
-setup(
- name=package_name,
- version="2.8.3.dev0",
- description="Python bindings for GPT4All",
- long_description=get_long_description(),
- long_description_content_type="text/markdown",
- author="Nomic and the Open Source Community",
- author_email="support@nomic.ai",
- url="https://www.nomic.ai/gpt4all",
- project_urls={
- "Documentation": "https://docs.gpt4all.io/gpt4all_python.html",
- "Source code": "https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python",
- "Changelog": "https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-bindings/python/CHANGELOG.md",
- },
- classifiers = [
- "Programming Language :: Python :: 3",
- "License :: OSI Approved :: MIT License",
- "Operating System :: OS Independent",
- ],
- python_requires='>=3.8',
- packages=find_packages(),
- install_requires=[
- 'importlib_resources; python_version < "3.9"',
- 'jinja2~=3.1',
- 'requests',
- 'tqdm',
- 'typing-extensions>=4.3.0; python_version >= "3.9" and python_version < "3.11"',
- ],
- extras_require={
- 'cuda': [
- 'nvidia-cuda-runtime-cu11',
- 'nvidia-cublas-cu11',
- ],
- 'all': [
- 'gpt4all[cuda]; platform_system == "Windows" or platform_system == "Linux"',
- ],
- 'dev': [
- 'gpt4all[all]',
- 'pytest',
- 'twine',
- 'wheel',
- 'setuptools',
- 'mkdocs-material',
- 'mkdocs-material[imaging]',
- 'mkautodoc',
- 'mkdocstrings[python]',
- 'mkdocs-jupyter',
- 'black',
- 'isort',
- 'typing-extensions>=3.10',
- ]
- },
- package_data={'llmodel': [os.path.join(DEST_CLIB_DIRECTORY, "*")]},
- include_package_data=True
-)
diff --git a/gpt4all-bindings/typescript/.clang-format b/gpt4all-bindings/typescript/.clang-format
deleted file mode 100644
index 98ba18a1..00000000
--- a/gpt4all-bindings/typescript/.clang-format
+++ /dev/null
@@ -1,4 +0,0 @@
----
-Language: Cpp
-BasedOnStyle: Microsoft
-ColumnLimit: 120
\ No newline at end of file
diff --git a/gpt4all-bindings/typescript/.gitignore b/gpt4all-bindings/typescript/.gitignore
deleted file mode 100644
index 7fd9d3cb..00000000
--- a/gpt4all-bindings/typescript/.gitignore
+++ /dev/null
@@ -1,11 +0,0 @@
-node_modules/
-build/
-prebuilds/
-.yarn/*
-!.yarn/patches
-!.yarn/plugins
-!.yarn/releases
-!.yarn/sdks
-!.yarn/versions
-runtimes/
-compile_flags.txt
diff --git a/gpt4all-bindings/typescript/.npmignore b/gpt4all-bindings/typescript/.npmignore
deleted file mode 100644
index 8921a757..00000000
--- a/gpt4all-bindings/typescript/.npmignore
+++ /dev/null
@@ -1,4 +0,0 @@
-test/
-spec/
-scripts/
-build
\ No newline at end of file
diff --git a/gpt4all-bindings/typescript/.yarnrc.yml b/gpt4all-bindings/typescript/.yarnrc.yml
deleted file mode 100644
index 3186f3f0..00000000
--- a/gpt4all-bindings/typescript/.yarnrc.yml
+++ /dev/null
@@ -1 +0,0 @@
-nodeLinker: node-modules
diff --git a/gpt4all-bindings/typescript/README.md b/gpt4all-bindings/typescript/README.md
deleted file mode 100644
index 384e4afb..00000000
--- a/gpt4all-bindings/typescript/README.md
+++ /dev/null
@@ -1,284 +0,0 @@
-# GPT4All Node.js API
-
-Native Node.js LLM bindings for all.
-
-```sh
-yarn add gpt4all@latest
-
-npm install gpt4all@latest
-
-pnpm install gpt4all@latest
-
-```
-## Breaking changes in version 4!!
-* See [Transition](#changes)
-## Contents
-* See [API Reference](#api-reference)
-* See [Examples](#api-example)
-* See [Developing](#develop)
-* GPT4ALL nodejs bindings created by [jacoobes](https://github.com/jacoobes), [limez](https://github.com/iimez) and the [nomic ai community](https://home.nomic.ai), for all to use.
-* [spare change](https://github.com/sponsors/jacoobes) for a college student? 🤑
-## Api Examples
-### Chat Completion
-
-Use a chat session to keep context between completions. This is useful for efficient back and forth conversations.
-
-```js
-import { createCompletion, loadModel } from "../src/gpt4all.js";
-
-const model = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", {
- verbose: true, // logs loaded model configuration
- device: "gpu", // defaults to 'cpu'
- nCtx: 2048, // the maximum sessions context window size.
-});
-
-// initialize a chat session on the model. a model instance can have only one chat session at a time.
-const chat = await model.createChatSession({
- // any completion options set here will be used as default for all completions in this chat session
- temperature: 0.8,
- // a custom systemPrompt can be set here. note that the template depends on the model.
- // if unset, the systemPrompt that comes with the model will be used.
- systemPrompt: "### System:\nYou are an advanced mathematician.\n\n",
-});
-
-// create a completion using a string as input
-const res1 = await createCompletion(chat, "What is 1 + 1?");
-console.debug(res1.choices[0].message);
-
-// multiple messages can be input to the conversation at once.
-// note that if the last message is not of role 'user', an empty message will be returned.
-await createCompletion(chat, [
- {
- role: "user",
- content: "What is 2 + 2?",
- },
- {
- role: "assistant",
- content: "It's 5.",
- },
-]);
-
-const res3 = await createCompletion(chat, "Could you recalculate that?");
-console.debug(res3.choices[0].message);
-
-model.dispose();
-```
-
-### Stateless usage
-You can use the model without a chat session. This is useful for one-off completions.
-
-```js
-import { createCompletion, loadModel } from "../src/gpt4all.js";
-
-const model = await loadModel("orca-mini-3b-gguf2-q4_0.gguf");
-
-// createCompletion methods can also be used on the model directly.
-// context is not maintained between completions.
-const res1 = await createCompletion(model, "What is 1 + 1?");
-console.debug(res1.choices[0].message);
-
-// a whole conversation can be input as well.
-// note that if the last message is not of role 'user', an error will be thrown.
-const res2 = await createCompletion(model, [
- {
- role: "user",
- content: "What is 2 + 2?",
- },
- {
- role: "assistant",
- content: "It's 5.",
- },
- {
- role: "user",
- content: "Could you recalculate that?",
- },
-]);
-console.debug(res2.choices[0].message);
-
-```
-
-### Embedding
-
-```js
-import { loadModel, createEmbedding } from '../src/gpt4all.js'
-
-const embedder = await loadModel("nomic-embed-text-v1.5.f16.gguf", { verbose: true, type: 'embedding'})
-
-console.log(createEmbedding(embedder, "Maybe Minecraft was the friends we made along the way"));
-```
-
-### Streaming responses
-```js
-import { loadModel, createCompletionStream } from "../src/gpt4all.js";
-
-const model = await loadModel("mistral-7b-openorca.gguf2.Q4_0.gguf", {
- device: "gpu",
-});
-
-process.stdout.write("Output: ");
-const stream = createCompletionStream(model, "How are you?");
-stream.tokens.on("data", (data) => {
- process.stdout.write(data);
-});
-//wait till stream finishes. We cannot continue until this one is done.
-await stream.result;
-process.stdout.write("\n");
-model.dispose();
-
-```
-
-### Async Generators
-```js
-import { loadModel, createCompletionGenerator } from "../src/gpt4all.js";
-
-const model = await loadModel("mistral-7b-openorca.gguf2.Q4_0.gguf");
-
-process.stdout.write("Output: ");
-const gen = createCompletionGenerator(
- model,
- "Redstone in Minecraft is Turing Complete. Let that sink in. (let it in!)"
-);
-for await (const chunk of gen) {
- process.stdout.write(chunk);
-}
-
-process.stdout.write("\n");
-model.dispose();
-
-```
-### Offline usage
-do this b4 going offline
-```sh
-curl -L https://gpt4all.io/models/models3.json -o ./models3.json
-```
-```js
-import { createCompletion, loadModel } from 'gpt4all'
-
-//make sure u downloaded the models before going offline!
-const model = await loadModel('mistral-7b-openorca.gguf2.Q4_0.gguf', {
- verbose: true,
- device: 'gpu',
- modelConfigFile: "./models3.json"
-});
-
-await createCompletion(model, 'What is 1 + 1?', { verbose: true })
-
-model.dispose();
-```
-
-## Develop
-### Build Instructions
-
-* `binding.gyp` is compile config
-* Tested on Ubuntu. Everything seems to work fine
-* Tested on Windows. Everything works fine.
-* Sparse testing on mac os.
-* MingW script works to build the gpt4all-backend. We left it there just in case. **HOWEVER**, this package works only with MSVC built dlls.
-
-### Requirements
-
-* git
-* [node.js >= 18.0.0](https://nodejs.org/en)
-* [yarn](https://yarnpkg.com/)
-* [node-gyp](https://github.com/nodejs/node-gyp)
- * all of its requirements.
-* (unix) gcc version 12
-* (win) msvc version 143
- * Can be obtained with visual studio 2022 build tools
-* python 3
-* On Windows and Linux, building GPT4All requires the complete Vulkan SDK. You may download it from here: https://vulkan.lunarg.com/sdk/home
-* macOS users do not need Vulkan, as GPT4All will use Metal instead.
-
-### Build (from source)
-
-```sh
-git clone https://github.com/nomic-ai/gpt4all.git
-cd gpt4all-bindings/typescript
-```
-
-* The below shell commands assume the current working directory is `typescript`.
-
-* To Build and Rebuild:
-
-```sh
-node scripts/prebuild.js
-```
-* llama.cpp git submodule for gpt4all can be possibly absent. If this is the case, make sure to run in llama.cpp parent directory
-
-```sh
-git submodule update --init --recursive
-```
-
-```sh
-yarn build:backend
-```
-This will build platform-dependent dynamic libraries, and will be located in runtimes/(platform)/native
-
-### Test
-
-```sh
-yarn test
-```
-
-### Source Overview
-
-#### src/
-
-* Extra functions to help aid devex
-* Typings for the native node addon
-* the javascript interface
-
-#### test/
-
-* simple unit testings for some functions exported.
-* more advanced ai testing is not handled
-
-#### spec/
-
-* Average look and feel of the api
-* Should work assuming a model and libraries are installed locally in working directory
-
-#### index.cc
-
-* The bridge between nodejs and c. Where the bindings are.
-
-#### prompt.cc
-
-* Handling prompting and inference of models in a threadsafe, asynchronous way.
-
-### Known Issues
-
-* why your model may be spewing bull 💩
- * The downloaded model is broken (just reinstall or download from official site)
-* Your model is hanging after a call to generate tokens.
- * Is `nPast` set too high? This may cause your model to hang (03/16/2024), Linux Mint, Ubuntu 22.04
-* Your GPU usage is still high after node.js exits.
- * Make sure to call `model.dispose()`!!!
-
-### Roadmap
-
-This package has been stabilizing over time development, and breaking changes may happen until the api stabilizes. Here's what's the todo list:
-
-* \[ ] Purely offline. Per the gui, which can be run completely offline, the bindings should be as well.
-* \[ ] NPM bundle size reduction via optionalDependencies strategy (need help)
- * Should include prebuilds to avoid painful node-gyp errors
-* \[x] createChatSession ( the python equivalent to create\_chat\_session )
-* \[x] generateTokens, the new name for createTokenStream. As of 3.2.0, this is released but not 100% tested. Check spec/generator.mjs!
-* \[x] ~~createTokenStream, an async iterator that streams each token emitted from the model. Planning on following this [example](https://github.com/nodejs/node-addon-examples/tree/main/threadsafe-async-iterator)~~ May not implement unless someone else can complete
-* \[x] prompt models via a threadsafe function in order to have proper non blocking behavior in nodejs
-* \[x] generateTokens is the new name for this^
-* \[x] proper unit testing (integrate with circle ci)
-* \[x] publish to npm under alpha tag `gpt4all@alpha`
-* \[x] have more people test on other platforms (mac tester needed)
-* \[x] switch to new pluggable backend
-
-## Changes
-This repository serves as the new bindings for nodejs users.
-- If you were a user of [these bindings](https://github.com/nomic-ai/gpt4all-ts), they are outdated.
-- Version 4 includes the follow breaking changes
- * `createEmbedding` & `EmbeddingModel.embed()` returns an object, `EmbeddingResult`, instead of a float32array.
- * Removed deprecated types `ModelType` and `ModelFile`
- * Removed deprecated initiation of model by string path only
-
-
-### API Reference
diff --git a/gpt4all-bindings/typescript/binding.ci.gyp b/gpt4all-bindings/typescript/binding.ci.gyp
deleted file mode 100644
index 5d511155..00000000
--- a/gpt4all-bindings/typescript/binding.ci.gyp
+++ /dev/null
@@ -1,62 +0,0 @@
-{
- "targets": [
- {
- "target_name": "gpt4all", # gpt4all-ts will cause compile error
- "include_dirs": [
- "(llmodel_required_mem(GetInference(), full_model_path.c_str(), nCtx, nGpuLayers)));
-}
-Napi::Value NodeModelWrapper::GetGpuDevices(const Napi::CallbackInfo &info)
-{
- auto env = info.Env();
- int num_devices = 0;
- auto mem_size = llmodel_required_mem(GetInference(), full_model_path.c_str(), nCtx, nGpuLayers);
- llmodel_gpu_device *all_devices = llmodel_available_gpu_devices(mem_size, &num_devices);
- if (all_devices == nullptr)
- {
- Napi::Error::New(env, "Unable to retrieve list of all GPU devices").ThrowAsJavaScriptException();
- return env.Undefined();
- }
- auto js_array = Napi::Array::New(env, num_devices);
- for (int i = 0; i < num_devices; ++i)
- {
- auto gpu_device = all_devices[i];
- /*
- *
- * struct llmodel_gpu_device {
- int index = 0;
- int type = 0; // same as VkPhysicalDeviceType
- size_t heapSize = 0;
- const char * name;
- const char * vendor;
- };
- *
- */
- Napi::Object js_gpu_device = Napi::Object::New(env);
- js_gpu_device["index"] = uint32_t(gpu_device.index);
- js_gpu_device["type"] = uint32_t(gpu_device.type);
- js_gpu_device["heapSize"] = static_cast(gpu_device.heapSize);
- js_gpu_device["name"] = gpu_device.name;
- js_gpu_device["vendor"] = gpu_device.vendor;
-
- js_array[i] = js_gpu_device;
- }
- return js_array;
-}
-
-Napi::Value NodeModelWrapper::GetType(const Napi::CallbackInfo &info)
-{
- if (type.empty())
- {
- return info.Env().Undefined();
- }
- return Napi::String::New(info.Env(), type);
-}
-
-Napi::Value NodeModelWrapper::InitGpuByString(const Napi::CallbackInfo &info)
-{
- auto env = info.Env();
- size_t memory_required = static_cast(info[0].As().Uint32Value());
-
- std::string gpu_device_identifier = info[1].As();
-
- size_t converted_value;
- if (memory_required <= std::numeric_limits::max())
- {
- converted_value = static_cast(memory_required);
- }
- else
- {
- Napi::Error::New(env, "invalid number for memory size. Exceeded bounds for memory.")
- .ThrowAsJavaScriptException();
- return env.Undefined();
- }
-
- auto result = llmodel_gpu_init_gpu_device_by_string(GetInference(), converted_value, gpu_device_identifier.c_str());
- return Napi::Boolean::New(env, result);
-}
-Napi::Value NodeModelWrapper::HasGpuDevice(const Napi::CallbackInfo &info)
-{
- return Napi::Boolean::New(info.Env(), llmodel_has_gpu_device(GetInference()));
-}
-
-NodeModelWrapper::NodeModelWrapper(const Napi::CallbackInfo &info) : Napi::ObjectWrap(info)
-{
- auto env = info.Env();
- auto config_object = info[0].As();
-
- // sets the directory where models (gguf files) are to be searched
- llmodel_set_implementation_search_path(
- config_object.Has("library_path") ? config_object.Get("library_path").As().Utf8Value().c_str()
- : ".");
-
- std::string model_name = config_object.Get("model_name").As();
- fs::path model_path = config_object.Get("model_path").As().Utf8Value();
- std::string full_weight_path = (model_path / fs::path(model_name)).string();
-
- name = model_name.empty() ? model_path.filename().string() : model_name;
- full_model_path = full_weight_path;
- nCtx = config_object.Get("nCtx").As().Int32Value();
- nGpuLayers = config_object.Get("ngl").As().Int32Value();
-
- const char *e;
- inference_ = llmodel_model_create2(full_weight_path.c_str(), "auto", &e);
- if (!inference_)
- {
- Napi::Error::New(env, e).ThrowAsJavaScriptException();
- return;
- }
- if (GetInference() == nullptr)
- {
- std::cerr << "Tried searching libraries in \"" << llmodel_get_implementation_search_path() << "\"" << std::endl;
- std::cerr << "Tried searching for model weight in \"" << full_weight_path << "\"" << std::endl;
- std::cerr << "Do you have runtime libraries installed?" << std::endl;
- Napi::Error::New(env, "Had an issue creating llmodel object, inference is null").ThrowAsJavaScriptException();
- return;
- }
-
- std::string device = config_object.Get("device").As();
- if (device != "cpu")
- {
- size_t mem = llmodel_required_mem(GetInference(), full_weight_path.c_str(), nCtx, nGpuLayers);
-
- auto success = llmodel_gpu_init_gpu_device_by_string(GetInference(), mem, device.c_str());
- if (!success)
- {
- // https://github.com/nomic-ai/gpt4all/blob/3acbef14b7c2436fe033cae9036e695d77461a16/gpt4all-bindings/python/gpt4all/pyllmodel.py#L215
- // Haven't implemented this but it is still open to contribution
- std::cout << "WARNING: Failed to init GPU\n";
- }
- }
-
- auto success = llmodel_loadModel(GetInference(), full_weight_path.c_str(), nCtx, nGpuLayers);
- if (!success)
- {
- Napi::Error::New(env, "Failed to load model at given path").ThrowAsJavaScriptException();
- return;
- }
- // optional
- if (config_object.Has("model_type"))
- {
- type = config_object.Get("model_type").As();
- }
-};
-
-// NodeModelWrapper::~NodeModelWrapper() {
-// if(GetInference() != nullptr) {
-// std::cout << "Debug: deleting model\n";
-// llmodel_model_destroy(inference_);
-// std::cout << (inference_ == nullptr);
-// }
-// }
-// void NodeModelWrapper::Finalize(Napi::Env env) {
-// if(inference_ != nullptr) {
-// std::cout << "Debug: deleting model\n";
-//
-// }
-// }
-Napi::Value NodeModelWrapper::IsModelLoaded(const Napi::CallbackInfo &info)
-{
- return Napi::Boolean::New(info.Env(), llmodel_isModelLoaded(GetInference()));
-}
-
-Napi::Value NodeModelWrapper::StateSize(const Napi::CallbackInfo &info)
-{
- // Implement the binding for the stateSize method
- return Napi::Number::New(info.Env(), static_cast(llmodel_get_state_size(GetInference())));
-}
-
-Napi::Array ChunkedFloatPtr(float *embedding_ptr, int embedding_size, int text_len, Napi::Env const &env)
-{
- auto n_embd = embedding_size / text_len;
- // std::cout << "Embedding size: " << embedding_size << std::endl;
- // std::cout << "Text length: " << text_len << std::endl;
- // std::cout << "Chunk size (n_embd): " << n_embd << std::endl;
- Napi::Array result = Napi::Array::New(env, text_len);
- auto count = 0;
- for (int i = 0; i < embedding_size; i += n_embd)
- {
- int end = std::min(i + n_embd, embedding_size);
- // possible bounds error?
- // Constructs a container with as many elements as the range [first,last), with each element emplace-constructed
- // from its corresponding element in that range, in the same order.
- std::vector chunk(embedding_ptr + i, embedding_ptr + end);
- Napi::Float32Array fltarr = Napi::Float32Array::New(env, chunk.size());
- // I know there's a way to emplace the raw float ptr into a Napi::Float32Array but idk how and
- // im too scared to cause memory issues
- // this is goodenough
- for (int j = 0; j < chunk.size(); j++)
- {
-
- fltarr.Set(j, chunk[j]);
- }
- result.Set(count++, fltarr);
- }
- return result;
-}
-
-Napi::Value NodeModelWrapper::GenerateEmbedding(const Napi::CallbackInfo &info)
-{
- auto env = info.Env();
-
- auto prefix = info[1];
- auto dimensionality = info[2].As().Int32Value();
- auto do_mean = info[3].As().Value();
- auto atlas = info[4].As().Value();
- size_t embedding_size;
- size_t token_count = 0;
-
- // This procedure can maybe be optimized but its whatever, i have too many intermediary structures
- std::vector text_arr;
- bool is_single_text = false;
- if (info[0].IsString())
- {
- is_single_text = true;
- text_arr.push_back(info[0].As().Utf8Value());
- }
- else
- {
- auto jsarr = info[0].As();
- size_t len = jsarr.Length();
- text_arr.reserve(len);
- for (size_t i = 0; i < len; ++i)
- {
- std::string str = jsarr.Get(i).As().Utf8Value();
- text_arr.push_back(str);
- }
- }
- std::vector str_ptrs;
- str_ptrs.reserve(text_arr.size() + 1);
- for (size_t i = 0; i < text_arr.size(); ++i)
- str_ptrs.push_back(text_arr[i].c_str());
- str_ptrs.push_back(nullptr);
- const char *_err = nullptr;
- float *embeds = llmodel_embed(GetInference(), str_ptrs.data(), &embedding_size,
- prefix.IsUndefined() ? nullptr : prefix.As().Utf8Value().c_str(),
- dimensionality, &token_count, do_mean, atlas, nullptr, &_err);
- if (!embeds)
- {
- // i dont wanna deal with c strings lol
- std::string err(_err);
- Napi::Error::New(env, err == "(unknown error)" ? "Unknown error: sorry bud" : err).ThrowAsJavaScriptException();
- return env.Undefined();
- }
- auto embedmat = ChunkedFloatPtr(embeds, embedding_size, text_arr.size(), env);
-
- llmodel_free_embedding(embeds);
- auto res = Napi::Object::New(env);
- res.Set("n_prompt_tokens", token_count);
- if(is_single_text) {
- res.Set("embeddings", embedmat.Get(static_cast(0)));
- } else {
- res.Set("embeddings", embedmat);
- }
-
- return res;
-}
-
-/**
- * Generate a response using the model.
- * @param prompt A string representing the input prompt.
- * @param options Inference options.
- */
-Napi::Value NodeModelWrapper::Infer(const Napi::CallbackInfo &info)
-{
- auto env = info.Env();
- std::string prompt;
- if (info[0].IsString())
- {
- prompt = info[0].As().Utf8Value();
- }
- else
- {
- Napi::Error::New(info.Env(), "invalid string argument").ThrowAsJavaScriptException();
- return info.Env().Undefined();
- }
-
- if (!info[1].IsObject())
- {
- Napi::Error::New(info.Env(), "Missing Prompt Options").ThrowAsJavaScriptException();
- return info.Env().Undefined();
- }
- // defaults copied from python bindings
- llmodel_prompt_context promptContext = {.logits = nullptr,
- .tokens = nullptr,
- .n_past = 0,
- .n_ctx = nCtx,
- .n_predict = 4096,
- .top_k = 40,
- .top_p = 0.9f,
- .min_p = 0.0f,
- .temp = 0.1f,
- .n_batch = 8,
- .repeat_penalty = 1.2f,
- .repeat_last_n = 10,
- .context_erase = 0.75};
-
- PromptWorkerConfig promptWorkerConfig;
-
- auto inputObject = info[1].As();
-
- if (inputObject.Has("logits") || inputObject.Has("tokens"))
- {
- Napi::Error::New(info.Env(), "Invalid input: 'logits' or 'tokens' properties are not allowed")
- .ThrowAsJavaScriptException();
- return info.Env().Undefined();
- }
-
- // Assign the remaining properties
- if (inputObject.Has("nPast") && inputObject.Get("nPast").IsNumber())
- {
- promptContext.n_past = inputObject.Get("nPast").As().Int32Value();
- }
- if (inputObject.Has("nPredict") && inputObject.Get("nPredict").IsNumber())
- {
- promptContext.n_predict = inputObject.Get("nPredict").As().Int32Value();
- }
- if (inputObject.Has("topK") && inputObject.Get("topK").IsNumber())
- {
- promptContext.top_k = inputObject.Get("topK").As().Int32Value();
- }
- if (inputObject.Has("topP") && inputObject.Get("topP").IsNumber())
- {
- promptContext.top_p = inputObject.Get("topP").As().FloatValue();
- }
- if (inputObject.Has("minP") && inputObject.Get("minP").IsNumber())
- {
- promptContext.min_p = inputObject.Get("minP").As().FloatValue();
- }
- if (inputObject.Has("temp") && inputObject.Get("temp").IsNumber())
- {
- promptContext.temp = inputObject.Get("temp").As().FloatValue();
- }
- if (inputObject.Has("nBatch") && inputObject.Get("nBatch").IsNumber())
- {
- promptContext.n_batch = inputObject.Get("nBatch").As().Int32Value();
- }
- if (inputObject.Has("repeatPenalty") && inputObject.Get("repeatPenalty").IsNumber())
- {
- promptContext.repeat_penalty = inputObject.Get("repeatPenalty").As().FloatValue();
- }
- if (inputObject.Has("repeatLastN") && inputObject.Get("repeatLastN").IsNumber())
- {
- promptContext.repeat_last_n = inputObject.Get("repeatLastN").As().Int32Value();
- }
- if (inputObject.Has("contextErase") && inputObject.Get("contextErase").IsNumber())
- {
- promptContext.context_erase = inputObject.Get("contextErase").As().FloatValue();
- }
- if (inputObject.Has("onPromptToken") && inputObject.Get("onPromptToken").IsFunction())
- {
- promptWorkerConfig.promptCallback = inputObject.Get("onPromptToken").As();
- promptWorkerConfig.hasPromptCallback = true;
- }
- if (inputObject.Has("onResponseToken") && inputObject.Get("onResponseToken").IsFunction())
- {
- promptWorkerConfig.responseCallback = inputObject.Get("onResponseToken").As();
- promptWorkerConfig.hasResponseCallback = true;
- }
-
- // copy to protect llmodel resources when splitting to new thread
- // llmodel_prompt_context copiedPrompt = promptContext;
- promptWorkerConfig.context = promptContext;
- promptWorkerConfig.model = GetInference();
- promptWorkerConfig.mutex = &inference_mutex;
- promptWorkerConfig.prompt = prompt;
- promptWorkerConfig.result = "";
-
- promptWorkerConfig.promptTemplate = inputObject.Get("promptTemplate").As();
- if (inputObject.Has("special"))
- {
- promptWorkerConfig.special = inputObject.Get("special").As();
- }
- if (inputObject.Has("fakeReply"))
- {
- // this will be deleted in the worker
- promptWorkerConfig.fakeReply = new std::string(inputObject.Get("fakeReply").As().Utf8Value());
- }
- auto worker = new PromptWorker(env, promptWorkerConfig);
-
- worker->Queue();
-
- return worker->GetPromise();
-}
-void NodeModelWrapper::Dispose(const Napi::CallbackInfo &info)
-{
- llmodel_model_destroy(inference_);
-}
-void NodeModelWrapper::SetThreadCount(const Napi::CallbackInfo &info)
-{
- if (info[0].IsNumber())
- {
- llmodel_setThreadCount(GetInference(), info[0].As().Int64Value());
- }
- else
- {
- Napi::Error::New(info.Env(), "Could not set thread count: argument 1 is NaN").ThrowAsJavaScriptException();
- return;
- }
-}
-
-Napi::Value NodeModelWrapper::GetName(const Napi::CallbackInfo &info)
-{
- return Napi::String::New(info.Env(), name);
-}
-Napi::Value NodeModelWrapper::ThreadCount(const Napi::CallbackInfo &info)
-{
- return Napi::Number::New(info.Env(), llmodel_threadCount(GetInference()));
-}
-
-Napi::Value NodeModelWrapper::GetLibraryPath(const Napi::CallbackInfo &info)
-{
- return Napi::String::New(info.Env(), llmodel_get_implementation_search_path());
-}
-
-llmodel_model NodeModelWrapper::GetInference()
-{
- return inference_;
-}
-
-// Exports Bindings
-Napi::Object Init(Napi::Env env, Napi::Object exports)
-{
- exports["LLModel"] = NodeModelWrapper::GetClass(env);
- return exports;
-}
-
-NODE_API_MODULE(NODE_GYP_MODULE_NAME, Init)
diff --git a/gpt4all-bindings/typescript/index.h b/gpt4all-bindings/typescript/index.h
deleted file mode 100644
index db3ef11e..00000000
--- a/gpt4all-bindings/typescript/index.h
+++ /dev/null
@@ -1,63 +0,0 @@
-#include "llmodel.h"
-#include "llmodel_c.h"
-#include "prompt.h"
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-namespace fs = std::filesystem;
-
-class NodeModelWrapper : public Napi::ObjectWrap
-{
-
- public:
- NodeModelWrapper(const Napi::CallbackInfo &);
- // virtual ~NodeModelWrapper();
- Napi::Value GetType(const Napi::CallbackInfo &info);
- Napi::Value IsModelLoaded(const Napi::CallbackInfo &info);
- Napi::Value StateSize(const Napi::CallbackInfo &info);
- // void Finalize(Napi::Env env) override;
- /**
- * Prompting the model. This entails spawning a new thread and adding the response tokens
- * into a thread local string variable.
- */
- Napi::Value Infer(const Napi::CallbackInfo &info);
- void SetThreadCount(const Napi::CallbackInfo &info);
- void Dispose(const Napi::CallbackInfo &info);
- Napi::Value GetName(const Napi::CallbackInfo &info);
- Napi::Value ThreadCount(const Napi::CallbackInfo &info);
- Napi::Value GenerateEmbedding(const Napi::CallbackInfo &info);
- Napi::Value HasGpuDevice(const Napi::CallbackInfo &info);
- Napi::Value ListGpus(const Napi::CallbackInfo &info);
- Napi::Value InitGpuByString(const Napi::CallbackInfo &info);
- Napi::Value GetRequiredMemory(const Napi::CallbackInfo &info);
- Napi::Value GetGpuDevices(const Napi::CallbackInfo &info);
- /*
- * The path that is used to search for the dynamic libraries
- */
- Napi::Value GetLibraryPath(const Napi::CallbackInfo &info);
- /**
- * Creates the LLModel class
- */
- static Napi::Function GetClass(Napi::Env);
- llmodel_model GetInference();
-
- private:
- /**
- * The underlying inference that interfaces with the C interface
- */
- llmodel_model inference_;
-
- std::mutex inference_mutex;
-
- std::string type;
- // corresponds to LLModel::name() in typescript
- std::string name;
- int nCtx{};
- int nGpuLayers{};
- std::string full_model_path;
-};
diff --git a/gpt4all-bindings/typescript/package.json b/gpt4all-bindings/typescript/package.json
deleted file mode 100644
index 7f3e368e..00000000
--- a/gpt4all-bindings/typescript/package.json
+++ /dev/null
@@ -1,53 +0,0 @@
-{
- "name": "gpt4all",
- "version": "4.0.0",
- "packageManager": "yarn@3.6.1",
- "main": "src/gpt4all.js",
- "repository": "nomic-ai/gpt4all",
- "scripts": {
- "install": "node-gyp-build",
- "test": "jest",
- "build:backend": "node scripts/build.js",
- "build": "node-gyp-build",
- "docs:build": "node scripts/docs.js && documentation readme ./src/gpt4all.d.ts --parse-extension js d.ts --format md --section \"API Reference\" --readme-file ../python/docs/gpt4all_nodejs.md"
- },
- "files": [
- "src/**/*",
- "runtimes/**/*",
- "binding.gyp",
- "prebuilds/**/*",
- "*.h",
- "*.cc",
- "gpt4all-backend/**/*"
- ],
- "dependencies": {
- "md5-file": "^5.0.0",
- "node-addon-api": "^6.1.0",
- "node-gyp-build": "^4.6.0"
- },
- "devDependencies": {
- "@types/node": "^20.1.5",
- "documentation": "^14.0.2",
- "jest": "^29.5.0",
- "prebuildify": "^5.0.1",
- "prettier": "^2.8.8"
- },
- "optionalDependencies": {
- "node-gyp": "9.x.x"
- },
- "engines": {
- "node": ">= 18.x.x"
- },
- "prettier": {
- "endOfLine": "lf",
- "tabWidth": 4
- },
- "jest": {
- "verbose": true
- },
- "publishConfig": {
- "registry": "https://registry.npmjs.org/",
- "access": "public",
- "tag": "latest"
- }
-}
diff --git a/gpt4all-bindings/typescript/prompt.cc b/gpt4all-bindings/typescript/prompt.cc
deleted file mode 100644
index d24a7e90..00000000
--- a/gpt4all-bindings/typescript/prompt.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-#include "prompt.h"
-#include
-
-PromptWorker::PromptWorker(Napi::Env env, PromptWorkerConfig config)
- : promise(Napi::Promise::Deferred::New(env)), _config(config), AsyncWorker(env)
-{
- if (_config.hasResponseCallback)
- {
- _responseCallbackFn = Napi::ThreadSafeFunction::New(config.responseCallback.Env(), config.responseCallback,
- "PromptWorker", 0, 1, this);
- }
-
- if (_config.hasPromptCallback)
- {
- _promptCallbackFn = Napi::ThreadSafeFunction::New(config.promptCallback.Env(), config.promptCallback,
- "PromptWorker", 0, 1, this);
- }
-}
-
-PromptWorker::~PromptWorker()
-{
- if (_config.hasResponseCallback)
- {
- _responseCallbackFn.Release();
- }
- if (_config.hasPromptCallback)
- {
- _promptCallbackFn.Release();
- }
-}
-
-void PromptWorker::Execute()
-{
- _config.mutex->lock();
-
- LLModelWrapper *wrapper = reinterpret_cast(_config.model);
-
- auto ctx = &_config.context;
-
- if (size_t(ctx->n_past) < wrapper->promptContext.tokens.size())
- wrapper->promptContext.tokens.resize(ctx->n_past);
-
- // Copy the C prompt context
- wrapper->promptContext.n_past = ctx->n_past;
- wrapper->promptContext.n_ctx = ctx->n_ctx;
- wrapper->promptContext.n_predict = ctx->n_predict;
- wrapper->promptContext.top_k = ctx->top_k;
- wrapper->promptContext.top_p = ctx->top_p;
- wrapper->promptContext.temp = ctx->temp;
- wrapper->promptContext.n_batch = ctx->n_batch;
- wrapper->promptContext.repeat_penalty = ctx->repeat_penalty;
- wrapper->promptContext.repeat_last_n = ctx->repeat_last_n;
- wrapper->promptContext.contextErase = ctx->context_erase;
-
- // Call the C++ prompt method
-
- wrapper->llModel->prompt(
- _config.prompt, _config.promptTemplate, [this](int32_t token_id) { return PromptCallback(token_id); },
- [this](int32_t token_id, const std::string token) { return ResponseCallback(token_id, token); },
- [](bool isRecalculating) { return isRecalculating; }, wrapper->promptContext, _config.special,
- _config.fakeReply);
-
- // Update the C context by giving access to the wrappers raw pointers to std::vector data
- // which involves no copies
- ctx->logits = wrapper->promptContext.logits.data();
- ctx->logits_size = wrapper->promptContext.logits.size();
- ctx->tokens = wrapper->promptContext.tokens.data();
- ctx->tokens_size = wrapper->promptContext.tokens.size();
-
- // Update the rest of the C prompt context
- ctx->n_past = wrapper->promptContext.n_past;
- ctx->n_ctx = wrapper->promptContext.n_ctx;
- ctx->n_predict = wrapper->promptContext.n_predict;
- ctx->top_k = wrapper->promptContext.top_k;
- ctx->top_p = wrapper->promptContext.top_p;
- ctx->temp = wrapper->promptContext.temp;
- ctx->n_batch = wrapper->promptContext.n_batch;
- ctx->repeat_penalty = wrapper->promptContext.repeat_penalty;
- ctx->repeat_last_n = wrapper->promptContext.repeat_last_n;
- ctx->context_erase = wrapper->promptContext.contextErase;
-
- _config.mutex->unlock();
-}
-
-void PromptWorker::OnOK()
-{
- Napi::Object returnValue = Napi::Object::New(Env());
- returnValue.Set("text", result);
- returnValue.Set("nPast", _config.context.n_past);
- promise.Resolve(returnValue);
- delete _config.fakeReply;
-}
-
-void PromptWorker::OnError(const Napi::Error &e)
-{
- delete _config.fakeReply;
- promise.Reject(e.Value());
-}
-
-Napi::Promise PromptWorker::GetPromise()
-{
- return promise.Promise();
-}
-
-bool PromptWorker::ResponseCallback(int32_t token_id, const std::string token)
-{
- if (token_id == -1)
- {
- return false;
- }
-
- if (!_config.hasResponseCallback)
- {
- return true;
- }
-
- result += token;
-
- std::promise promise;
-
- auto info = new ResponseCallbackData();
- info->tokenId = token_id;
- info->token = token;
-
- auto future = promise.get_future();
-
- auto status = _responseCallbackFn.BlockingCall(
- info, [&promise](Napi::Env env, Napi::Function jsCallback, ResponseCallbackData *value) {
- try
- {
- // Transform native data into JS data, passing it to the provided
- // `jsCallback` -- the TSFN's JavaScript function.
- auto token_id = Napi::Number::New(env, value->tokenId);
- auto token = Napi::String::New(env, value->token);
- auto jsResult = jsCallback.Call({token_id, token}).ToBoolean();
- promise.set_value(jsResult);
- }
- catch (const Napi::Error &e)
- {
- std::cerr << "Error in onResponseToken callback: " << e.what() << std::endl;
- promise.set_value(false);
- }
-
- delete value;
- });
- if (status != napi_ok)
- {
- Napi::Error::Fatal("PromptWorkerResponseCallback", "Napi::ThreadSafeNapi::Function.NonBlockingCall() failed");
- }
-
- return future.get();
-}
-
-bool PromptWorker::RecalculateCallback(bool isRecalculating)
-{
- return isRecalculating;
-}
-
-bool PromptWorker::PromptCallback(int32_t token_id)
-{
- if (!_config.hasPromptCallback)
- {
- return true;
- }
-
- std::promise promise;
-
- auto info = new PromptCallbackData();
- info->tokenId = token_id;
-
- auto future = promise.get_future();
-
- auto status = _promptCallbackFn.BlockingCall(
- info, [&promise](Napi::Env env, Napi::Function jsCallback, PromptCallbackData *value) {
- try
- {
- // Transform native data into JS data, passing it to the provided
- // `jsCallback` -- the TSFN's JavaScript function.
- auto token_id = Napi::Number::New(env, value->tokenId);
- auto jsResult = jsCallback.Call({token_id}).ToBoolean();
- promise.set_value(jsResult);
- }
- catch (const Napi::Error &e)
- {
- std::cerr << "Error in onPromptToken callback: " << e.what() << std::endl;
- promise.set_value(false);
- }
- delete value;
- });
- if (status != napi_ok)
- {
- Napi::Error::Fatal("PromptWorkerPromptCallback", "Napi::ThreadSafeNapi::Function.NonBlockingCall() failed");
- }
-
- return future.get();
-}
diff --git a/gpt4all-bindings/typescript/prompt.h b/gpt4all-bindings/typescript/prompt.h
deleted file mode 100644
index 49c43620..00000000
--- a/gpt4all-bindings/typescript/prompt.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#ifndef PREDICT_WORKER_H
-#define PREDICT_WORKER_H
-
-#include "llmodel.h"
-#include "llmodel_c.h"
-#include "napi.h"
-#include
-#include
-#include
-#include
-#include
-
-struct ResponseCallbackData
-{
- int32_t tokenId;
- std::string token;
-};
-
-struct PromptCallbackData
-{
- int32_t tokenId;
-};
-
-struct LLModelWrapper
-{
- LLModel *llModel = nullptr;
- LLModel::PromptContext promptContext;
- ~LLModelWrapper()
- {
- delete llModel;
- }
-};
-
-struct PromptWorkerConfig
-{
- Napi::Function responseCallback;
- bool hasResponseCallback = false;
- Napi::Function promptCallback;
- bool hasPromptCallback = false;
- llmodel_model model;
- std::mutex *mutex;
- std::string prompt;
- std::string promptTemplate;
- llmodel_prompt_context context;
- std::string result;
- bool special = false;
- std::string *fakeReply = nullptr;
-};
-
-class PromptWorker : public Napi::AsyncWorker
-{
- public:
- PromptWorker(Napi::Env env, PromptWorkerConfig config);
- ~PromptWorker();
- void Execute() override;
- void OnOK() override;
- void OnError(const Napi::Error &e) override;
- Napi::Promise GetPromise();
-
- bool ResponseCallback(int32_t token_id, const std::string token);
- bool RecalculateCallback(bool isrecalculating);
- bool PromptCallback(int32_t token_id);
-
- private:
- Napi::Promise::Deferred promise;
- std::string result;
- PromptWorkerConfig _config;
- Napi::ThreadSafeFunction _responseCallbackFn;
- Napi::ThreadSafeFunction _promptCallbackFn;
-};
-
-#endif // PREDICT_WORKER_H
diff --git a/gpt4all-bindings/typescript/scripts/build.js b/gpt4all-bindings/typescript/scripts/build.js
deleted file mode 100644
index e89550f8..00000000
--- a/gpt4all-bindings/typescript/scripts/build.js
+++ /dev/null
@@ -1,17 +0,0 @@
-const { spawn } = require("node:child_process");
-const { resolve } = require("path");
-const args = process.argv.slice(2);
-const platform = process.platform;
-//windows 64bit or 32
-if (platform === "win32") {
- const path = "scripts/build_msvc.bat";
- spawn(resolve(path), ["/Y", ...args], { shell: true, stdio: "inherit" });
- process.on("data", (s) => console.log(s.toString()));
-} else if (platform === "linux" || platform === "darwin") {
- const path = "scripts/build_unix.sh";
- spawn(`sh `, [path, args], {
- shell: true,
- stdio: "inherit",
- });
- process.on("data", (s) => console.log(s.toString()));
-}
diff --git a/gpt4all-bindings/typescript/scripts/build_mingw.ps1 b/gpt4all-bindings/typescript/scripts/build_mingw.ps1
deleted file mode 100644
index a94d44fd..00000000
--- a/gpt4all-bindings/typescript/scripts/build_mingw.ps1
+++ /dev/null
@@ -1,16 +0,0 @@
-$ROOT_DIR = '.\runtimes\win-x64'
-$BUILD_DIR = '.\runtimes\win-x64\build\mingw'
-$LIBS_DIR = '.\runtimes\win-x64\native'
-
-# cleanup env
-Remove-Item -Force -Recurse $ROOT_DIR -ErrorAction SilentlyContinue | Out-Null
-mkdir $BUILD_DIR | Out-Null
-mkdir $LIBS_DIR | Out-Null
-
-# build
-cmake -G "MinGW Makefiles" -S ..\..\gpt4all-backend -B $BUILD_DIR -DLLAMA_AVX2=ON
-cmake --build $BUILD_DIR --parallel --config Release
-
-# copy native dlls
-# cp "C:\ProgramData\mingw64\mingw64\bin\*dll" $LIBS_DIR
-cp "$BUILD_DIR\bin\*.dll" $LIBS_DIR
diff --git a/gpt4all-bindings/typescript/scripts/build_msvc.bat b/gpt4all-bindings/typescript/scripts/build_msvc.bat
deleted file mode 100644
index 18b215d1..00000000
--- a/gpt4all-bindings/typescript/scripts/build_msvc.bat
+++ /dev/null
@@ -1,33 +0,0 @@
-@echo off
-
-set "BUILD_TYPE=Release"
-set "BUILD_DIR=.\build\win-x64-msvc"
-set "LIBS_DIR=.\runtimes\win32-x64"
-
-REM Cleanup env
-rmdir /s /q %BUILD_DIR%
-
-REM Create directories
-mkdir %BUILD_DIR%
-mkdir %LIBS_DIR%
-
-REM Build
-cmake -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=%BUILD_TYPE% -S ..\..\gpt4all-backend -B %BUILD_DIR% -A x64
-
-:BUILD
-REM Build the project
-cmake --build "%BUILD_DIR%" --parallel --config %BUILD_TYPE%
-
-REM Check the exit code of the build command
-if %errorlevel% neq 0 (
- echo Build failed. Retrying...
- goto BUILD
-)
-
-mkdir runtimes\win32-x64
-
-REM Copy the DLLs to the desired location
-del /F /A /Q %LIBS_DIR%
-xcopy /Y "%BUILD_DIR%\bin\%BUILD_TYPE%\*.dll" runtimes\win32-x64\native\
-
-echo Batch script execution completed.
diff --git a/gpt4all-bindings/typescript/scripts/build_unix.sh b/gpt4all-bindings/typescript/scripts/build_unix.sh
deleted file mode 100755
index d60343c9..00000000
--- a/gpt4all-bindings/typescript/scripts/build_unix.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-
-SYSNAME=$(uname -s)
-
-if [ "$SYSNAME" = "Linux" ]; then
- BASE_DIR="runtimes/linux-x64"
- LIB_EXT="so"
-elif [ "$SYSNAME" = "Darwin" ]; then
- BASE_DIR="runtimes/osx"
- LIB_EXT="dylib"
-elif [ -n "$SYSNAME" ]; then
- echo "Unsupported system: $SYSNAME" >&2
- exit 1
-else
- echo "\"uname -s\" failed" >&2
- exit 1
-fi
-
-NATIVE_DIR="$BASE_DIR/native"
-BUILD_DIR="$BASE_DIR/build"
-
-rm -rf "$BASE_DIR"
-mkdir -p "$NATIVE_DIR" "$BUILD_DIR"
-
-cmake -S ../../gpt4all-backend -B "$BUILD_DIR" &&
-cmake --build "$BUILD_DIR" -j --config Release && {
- cp "$BUILD_DIR"/libgptj*.$LIB_EXT "$NATIVE_DIR"/
- cp "$BUILD_DIR"/libllama*.$LIB_EXT "$NATIVE_DIR"/
-}
diff --git a/gpt4all-bindings/typescript/scripts/docs.js b/gpt4all-bindings/typescript/scripts/docs.js
deleted file mode 100644
index e68495ef..00000000
--- a/gpt4all-bindings/typescript/scripts/docs.js
+++ /dev/null
@@ -1,12 +0,0 @@
-//Maybe some command line piping would work better, but can't think of platform independent command line tool
-
-const fs = require('fs');
-
-const newPath = '../python/docs/gpt4all_nodejs.md';
-const filepath = './README.md';
-const intro = fs.readFileSync(filepath);
-
-fs.writeFileSync(
- newPath, intro
-);
-
diff --git a/gpt4all-bindings/typescript/scripts/mkclangd.js b/gpt4all-bindings/typescript/scripts/mkclangd.js
deleted file mode 100644
index 20494490..00000000
--- a/gpt4all-bindings/typescript/scripts/mkclangd.js
+++ /dev/null
@@ -1,43 +0,0 @@
-/// makes compile_flags.txt for clangd server support with this project
-/// run this with typescript as your cwd
-//
-//for debian users make sure to install libstdc++-12-dev
-
-const nodeaddonapi=require('node-addon-api').include;
-
-const fsp = require('fs/promises');
-const { existsSync, readFileSync } = require('fs');
-const assert = require('node:assert');
-const findnodeapih = () => {
- assert(existsSync("./build"), "Haven't built the application once yet. run node scripts/prebuild.js");
- const dir = readFileSync("./build/config.gypi", 'utf8');
- const nodedir_line = dir.match(/"nodedir": "([^"]+)"/);
- assert(nodedir_line, "Found no matches")
- assert(nodedir_line[1]);
- console.log("node_api.h found at: ", nodedir_line[1]);
- return nodedir_line[1]+"/include/node";
-};
-
-const knownIncludes = [
- '-I',
- './',
- '-I',
- nodeaddonapi.substring(1, nodeaddonapi.length-1),
- '-I',
- '../../gpt4all-backend',
- '-I',
- findnodeapih()
-];
-const knownFlags = [
- "-x",
- "c++",
- '-std=c++17'
-];
-
-
-const output = knownFlags.join('\n')+'\n'+knownIncludes.join('\n');
-
-fsp.writeFile('./compile_flags.txt', output, 'utf8')
- .then(() => console.log('done'))
- .catch(() => console.err('failed'));
-
diff --git a/gpt4all-bindings/typescript/scripts/prebuild.js b/gpt4all-bindings/typescript/scripts/prebuild.js
deleted file mode 100644
index 0ea196f7..00000000
--- a/gpt4all-bindings/typescript/scripts/prebuild.js
+++ /dev/null
@@ -1,58 +0,0 @@
-const prebuildify = require("prebuildify");
-
-async function createPrebuilds(combinations) {
- for (const { platform, arch } of combinations) {
- const opts = {
- platform,
- arch,
- napi: true,
- targets: ["18.16.0"]
- };
- try {
- await createPrebuild(opts);
- console.log(
- `Build succeeded for platform ${opts.platform} and architecture ${opts.arch}`
- );
- } catch (err) {
- console.error(
- `Error building for platform ${opts.platform} and architecture ${opts.arch}:`,
- err
- );
- }
- }
-}
-
-function createPrebuild(opts) {
- return new Promise((resolve, reject) => {
- prebuildify(opts, (err) => {
- if (err) {
- reject(err);
- } else {
- resolve();
- }
- });
- });
-}
-
-let prebuildConfigs;
-if(process.platform === 'win32') {
- prebuildConfigs = [
- { platform: "win32", arch: "x64" }
- ];
-} else if(process.platform === 'linux') {
- //Unsure if darwin works, need mac tester!
- prebuildConfigs = [
- { platform: "linux", arch: "x64" },
- //{ platform: "linux", arch: "arm64" },
- //{ platform: "linux", arch: "armv7" },
- ]
-} else if(process.platform === 'darwin') {
- prebuildConfigs = [
- { platform: "darwin", arch: "x64" },
- { platform: "darwin", arch: "arm64" },
- ]
-}
-
-createPrebuilds(prebuildConfigs)
- .then(() => console.log("All builds succeeded"))
- .catch((err) => console.error("Error building:", err));
diff --git a/gpt4all-bindings/typescript/spec/callbacks.mjs b/gpt4all-bindings/typescript/spec/callbacks.mjs
deleted file mode 100644
index 461f32be..00000000
--- a/gpt4all-bindings/typescript/spec/callbacks.mjs
+++ /dev/null
@@ -1,31 +0,0 @@
-import { promises as fs } from "node:fs";
-import { loadModel, createCompletion } from "../src/gpt4all.js";
-
-const model = await loadModel("Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", {
- verbose: true,
- device: "gpu",
-});
-
-const res = await createCompletion(
- model,
- "I've got three 🍣 - What shall I name them?",
- {
- onPromptToken: (tokenId) => {
- console.debug("onPromptToken", { tokenId });
- // throwing an error will cancel
- throw new Error("This is an error");
- // const foo = thisMethodDoesNotExist();
- // returning false will cancel as well
- // return false;
- },
- onResponseToken: (tokenId, token) => {
- console.debug("onResponseToken", { tokenId, token });
- // same applies here
- },
- }
-);
-
-console.debug("Output:", {
- usage: res.usage,
- message: res.choices[0].message,
-});
diff --git a/gpt4all-bindings/typescript/spec/chat-memory.mjs b/gpt4all-bindings/typescript/spec/chat-memory.mjs
deleted file mode 100644
index 9a771633..00000000
--- a/gpt4all-bindings/typescript/spec/chat-memory.mjs
+++ /dev/null
@@ -1,65 +0,0 @@
-import { loadModel, createCompletion } from "../src/gpt4all.js";
-
-const model = await loadModel("Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", {
- verbose: true,
- device: "gpu",
-});
-
-const chat = await model.createChatSession({
- messages: [
- {
- role: "user",
- content: "I'll tell you a secret password: It's 63445.",
- },
- {
- role: "assistant",
- content: "I will do my best to remember that.",
- },
- {
- role: "user",
- content:
- "And here another fun fact: Bananas may be bluer than bread at night.",
- },
- {
- role: "assistant",
- content: "Yes, that makes sense.",
- },
- ],
-});
-
-const turn1 = await createCompletion(
- chat,
- "Please tell me the secret password."
-);
-console.debug(turn1.choices[0].message);
-// "The secret password you shared earlier is 63445.""
-
-const turn2 = await createCompletion(
- chat,
- "Thanks! Have your heard about the bananas?"
-);
-console.debug(turn2.choices[0].message);
-
-for (let i = 0; i < 32; i++) {
- // gpu go brr
- const turn = await createCompletion(
- chat,
- i % 2 === 0 ? "Tell me a fun fact." : "And a boring one?"
- );
- console.debug({
- message: turn.choices[0].message,
- n_past_tokens: turn.usage.n_past_tokens,
- });
-}
-
-const finalTurn = await createCompletion(
- chat,
- "Now I forgot the secret password. Can you remind me?"
-);
-console.debug(finalTurn.choices[0].message);
-
-// result of finalTurn may vary depending on whether the generated facts pushed the secret out of the context window.
-// "Of course! The secret password you shared earlier is 63445."
-// "I apologize for any confusion. As an AI language model, ..."
-
-model.dispose();
diff --git a/gpt4all-bindings/typescript/spec/chat-minimal.mjs b/gpt4all-bindings/typescript/spec/chat-minimal.mjs
deleted file mode 100644
index 6d822f23..00000000
--- a/gpt4all-bindings/typescript/spec/chat-minimal.mjs
+++ /dev/null
@@ -1,19 +0,0 @@
-import { loadModel, createCompletion } from "../src/gpt4all.js";
-
-const model = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", {
- verbose: true,
- device: "gpu",
-});
-
-const chat = await model.createChatSession();
-
-await createCompletion(
- chat,
- "Why are bananas rather blue than bread at night sometimes?",
- {
- verbose: true,
- }
-);
-await createCompletion(chat, "Are you sure?", {
- verbose: true,
-});
diff --git a/gpt4all-bindings/typescript/spec/concurrency.mjs b/gpt4all-bindings/typescript/spec/concurrency.mjs
deleted file mode 100644
index 55ba9046..00000000
--- a/gpt4all-bindings/typescript/spec/concurrency.mjs
+++ /dev/null
@@ -1,29 +0,0 @@
-import {
- loadModel,
- createCompletion,
-} from "../src/gpt4all.js";
-
-const modelOptions = {
- verbose: true,
-};
-
-const model1 = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", {
- ...modelOptions,
- device: "gpu", // only one model can be on gpu
-});
-const model2 = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", modelOptions);
-const model3 = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", modelOptions);
-
-const promptContext = {
- verbose: true,
-}
-
-const responses = await Promise.all([
- createCompletion(model1, "What is 1 + 1?", promptContext),
- // generating with the same model instance will wait for the previous completion to finish
- createCompletion(model1, "What is 1 + 1?", promptContext),
- // generating with different model instances will run in parallel
- createCompletion(model2, "What is 1 + 2?", promptContext),
- createCompletion(model3, "What is 1 + 3?", promptContext),
-]);
-console.log(responses.map((res) => res.choices[0].message));
diff --git a/gpt4all-bindings/typescript/spec/embed-jsonl.mjs b/gpt4all-bindings/typescript/spec/embed-jsonl.mjs
deleted file mode 100644
index 2eb4bcab..00000000
--- a/gpt4all-bindings/typescript/spec/embed-jsonl.mjs
+++ /dev/null
@@ -1,26 +0,0 @@
-import { loadModel, createEmbedding } from '../src/gpt4all.js'
-import { createGunzip, createGzip, createUnzip } from 'node:zlib';
-import { Readable } from 'stream'
-import readline from 'readline'
-const embedder = await loadModel("nomic-embed-text-v1.5.f16.gguf", { verbose: true, type: 'embedding', device: 'gpu' })
-console.log("Running with", embedder.llm.threadCount(), "threads");
-
-
-const unzip = createGunzip();
-const url = "https://huggingface.co/datasets/sentence-transformers/embedding-training-data/resolve/main/squad_pairs.jsonl.gz"
-const stream = await fetch(url)
- .then(res => Readable.fromWeb(res.body));
-
-const lineReader = readline.createInterface({
- input: stream.pipe(unzip),
- crlfDelay: Infinity
-})
-
-lineReader.on('line', line => {
- //pairs of questions and answers
- const question_answer = JSON.parse(line)
- console.log(createEmbedding(embedder, question_answer))
-})
-
-lineReader.on('close', () => embedder.dispose())
-
diff --git a/gpt4all-bindings/typescript/spec/embed.mjs b/gpt4all-bindings/typescript/spec/embed.mjs
deleted file mode 100644
index d3dc4e1b..00000000
--- a/gpt4all-bindings/typescript/spec/embed.mjs
+++ /dev/null
@@ -1,12 +0,0 @@
-import { loadModel, createEmbedding } from '../src/gpt4all.js'
-
-const embedder = await loadModel("nomic-embed-text-v1.5.f16.gguf", { verbose: true, type: 'embedding' , device: 'gpu' })
-
-try {
-console.log(createEmbedding(embedder, ["Accept your current situation", "12312"], { prefix: "search_document" }))
-
-} catch(e) {
-console.log(e)
-}
-
-embedder.dispose()
diff --git a/gpt4all-bindings/typescript/spec/llmodel.mjs b/gpt4all-bindings/typescript/spec/llmodel.mjs
deleted file mode 100644
index baa6ed76..00000000
--- a/gpt4all-bindings/typescript/spec/llmodel.mjs
+++ /dev/null
@@ -1,61 +0,0 @@
-import {
- LLModel,
- createCompletion,
- DEFAULT_DIRECTORY,
- DEFAULT_LIBRARIES_DIRECTORY,
- loadModel,
-} from "../src/gpt4all.js";
-
-const model = await loadModel("mistral-7b-openorca.gguf2.Q4_0.gguf", {
- verbose: true,
- device: "gpu",
-});
-const ll = model.llm;
-
-try {
- class Extended extends LLModel {}
-} catch (e) {
- console.log("Extending from native class gone wrong " + e);
-}
-
-console.log("state size " + ll.stateSize());
-
-console.log("thread count " + ll.threadCount());
-ll.setThreadCount(5);
-
-console.log("thread count " + ll.threadCount());
-ll.setThreadCount(4);
-console.log("thread count " + ll.threadCount());
-console.log("name " + ll.name());
-console.log("type: " + ll.type());
-console.log("Default directory for models", DEFAULT_DIRECTORY);
-console.log("Default directory for libraries", DEFAULT_LIBRARIES_DIRECTORY);
-console.log("Has GPU", ll.hasGpuDevice());
-console.log("gpu devices", ll.listGpu());
-console.log("Required Mem in bytes", ll.memoryNeeded());
-
-// to ingest a custom system prompt without using a chat session.
-await createCompletion(
- model,
- "<|im_start|>system\nYou are an advanced mathematician.\n<|im_end|>\n",
- {
- promptTemplate: "%1",
- nPredict: 0,
- special: true,
- }
-);
-const completion1 = await createCompletion(model, "What is 1 + 1?", {
- verbose: true,
-});
-console.log(`🤖 > ${completion1.choices[0].message.content}`);
-//Very specific:
-// tested on Ubuntu 22.0, Linux Mint, if I set nPast to 100, the app hangs.
-const completion2 = await createCompletion(model, "And if we add two?", {
- verbose: true,
-});
-console.log(`🤖 > ${completion2.choices[0].message.content}`);
-
-//CALLING DISPOSE WILL INVALID THE NATIVE MODEL. USE THIS TO CLEANUP
-model.dispose();
-
-console.log("model disposed, exiting...");
diff --git a/gpt4all-bindings/typescript/spec/long-context.mjs b/gpt4all-bindings/typescript/spec/long-context.mjs
deleted file mode 100644
index abe3f36d..00000000
--- a/gpt4all-bindings/typescript/spec/long-context.mjs
+++ /dev/null
@@ -1,21 +0,0 @@
-import { promises as fs } from "node:fs";
-import { loadModel, createCompletion } from "../src/gpt4all.js";
-
-const model = await loadModel("Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", {
- verbose: true,
- device: "gpu",
- nCtx: 32768,
-});
-
-const typeDefSource = await fs.readFile("./src/gpt4all.d.ts", "utf-8");
-
-const res = await createCompletion(
- model,
- "Here are the type definitions for the GPT4All API:\n\n" +
- typeDefSource +
- "\n\nHow do I create a completion with a really large context window?",
- {
- verbose: true,
- }
-);
-console.debug(res.choices[0].message);
diff --git a/gpt4all-bindings/typescript/spec/model-switching.mjs b/gpt4all-bindings/typescript/spec/model-switching.mjs
deleted file mode 100644
index 264c7156..00000000
--- a/gpt4all-bindings/typescript/spec/model-switching.mjs
+++ /dev/null
@@ -1,60 +0,0 @@
-import { loadModel, createCompletion } from "../src/gpt4all.js";
-
-const model1 = await loadModel("Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", {
- device: "gpu",
- nCtx: 4096,
-});
-
-const chat1 = await model1.createChatSession({
- temperature: 0.8,
- topP: 0.7,
- topK: 60,
-});
-
-const chat1turn1 = await createCompletion(
- chat1,
- "Outline a short story concept for adults. About why bananas are rather blue than bread is green at night sometimes. Not too long."
-);
-console.debug(chat1turn1.choices[0].message);
-
-const chat1turn2 = await createCompletion(
- chat1,
- "Lets sprinkle some plot twists. And a cliffhanger at the end."
-);
-console.debug(chat1turn2.choices[0].message);
-
-const chat1turn3 = await createCompletion(
- chat1,
- "Analyze your plot. Find the weak points."
-);
-console.debug(chat1turn3.choices[0].message);
-
-const chat1turn4 = await createCompletion(
- chat1,
- "Rewrite it based on the analysis."
-);
-console.debug(chat1turn4.choices[0].message);
-
-model1.dispose();
-
-const model2 = await loadModel("gpt4all-falcon-newbpe-q4_0.gguf", {
- device: "gpu",
-});
-
-const chat2 = await model2.createChatSession({
- messages: chat1.messages,
-});
-
-const chat2turn1 = await createCompletion(
- chat2,
- "Give three ideas how this plot could be improved."
-);
-console.debug(chat2turn1.choices[0].message);
-
-const chat2turn2 = await createCompletion(
- chat2,
- "Revise the plot, applying your ideas."
-);
-console.debug(chat2turn2.choices[0].message);
-
-model2.dispose();
diff --git a/gpt4all-bindings/typescript/spec/stateless.mjs b/gpt4all-bindings/typescript/spec/stateless.mjs
deleted file mode 100644
index 6e3f82b2..00000000
--- a/gpt4all-bindings/typescript/spec/stateless.mjs
+++ /dev/null
@@ -1,50 +0,0 @@
-import { loadModel, createCompletion } from "../src/gpt4all.js";
-
-const model = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", {
- verbose: true,
- device: "gpu",
-});
-
-const messages = [
- {
- role: "system",
- content: "<|im_start|>system\nYou are an advanced mathematician.\n<|im_end|>\n",
- },
- {
- role: "user",
- content: "What's 2+2?",
- },
- {
- role: "assistant",
- content: "5",
- },
- {
- role: "user",
- content: "Are you sure?",
- },
-];
-
-
-const res1 = await createCompletion(model, messages);
-console.debug(res1.choices[0].message);
-messages.push(res1.choices[0].message);
-
-messages.push({
- role: "user",
- content: "Could you double check that?",
-});
-
-const res2 = await createCompletion(model, messages);
-console.debug(res2.choices[0].message);
-messages.push(res2.choices[0].message);
-
-messages.push({
- role: "user",
- content: "Let's bring out the big calculators.",
-});
-
-const res3 = await createCompletion(model, messages);
-console.debug(res3.choices[0].message);
-messages.push(res3.choices[0].message);
-
-// console.debug(messages);
diff --git a/gpt4all-bindings/typescript/spec/streaming.mjs b/gpt4all-bindings/typescript/spec/streaming.mjs
deleted file mode 100644
index 0dfcfd7b..00000000
--- a/gpt4all-bindings/typescript/spec/streaming.mjs
+++ /dev/null
@@ -1,57 +0,0 @@
-import {
- loadModel,
- createCompletion,
- createCompletionStream,
- createCompletionGenerator,
-} from "../src/gpt4all.js";
-
-const model = await loadModel("mistral-7b-openorca.gguf2.Q4_0.gguf", {
- device: "gpu",
-});
-
-process.stdout.write("### Stream:");
-const stream = createCompletionStream(model, "How are you?");
-stream.tokens.on("data", (data) => {
- process.stdout.write(data);
-});
-await stream.result;
-process.stdout.write("\n");
-
-process.stdout.write("### Stream with pipe:");
-const stream2 = createCompletionStream(
- model,
- "Please say something nice about node streams."
-);
-stream2.tokens.pipe(process.stdout);
-const stream2Res = await stream2.result;
-process.stdout.write("\n");
-
-process.stdout.write("### Generator:");
-const gen = createCompletionGenerator(model, "generators instead?", {
- nPast: stream2Res.usage.n_past_tokens,
-});
-for await (const chunk of gen) {
- process.stdout.write(chunk);
-}
-
-process.stdout.write("\n");
-
-process.stdout.write("### Callback:");
-await createCompletion(model, "Why not just callbacks?", {
- onResponseToken: (tokenId, token) => {
- process.stdout.write(token);
- },
-});
-process.stdout.write("\n");
-
-process.stdout.write("### 2nd Generator:");
-const gen2 = createCompletionGenerator(model, "If 3 + 3 is 5, what is 2 + 2?");
-
-let chunk = await gen2.next();
-while (!chunk.done) {
- process.stdout.write(chunk.value);
- chunk = await gen2.next();
-}
-process.stdout.write("\n");
-console.debug("generator finished", chunk);
-model.dispose();
diff --git a/gpt4all-bindings/typescript/spec/system.mjs b/gpt4all-bindings/typescript/spec/system.mjs
deleted file mode 100644
index f80e3f3a..00000000
--- a/gpt4all-bindings/typescript/spec/system.mjs
+++ /dev/null
@@ -1,19 +0,0 @@
-import {
- loadModel,
- createCompletion,
-} from "../src/gpt4all.js";
-
-const model = await loadModel("Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", {
- verbose: true,
- device: "gpu",
-});
-
-const chat = await model.createChatSession({
- verbose: true,
- systemPrompt: "<|im_start|>system\nRoleplay as Batman. Answer as if you are Batman, never say you're an Assistant.\n<|im_end|>",
-});
-const turn1 = await createCompletion(chat, "You have any plans tonight?");
-console.log(turn1.choices[0].message);
-// "I'm afraid I must decline any personal invitations tonight. As Batman, I have a responsibility to protect Gotham City."
-
-model.dispose();
diff --git a/gpt4all-bindings/typescript/src/chat-session.js b/gpt4all-bindings/typescript/src/chat-session.js
deleted file mode 100644
index dcbdb7da..00000000
--- a/gpt4all-bindings/typescript/src/chat-session.js
+++ /dev/null
@@ -1,169 +0,0 @@
-const { DEFAULT_PROMPT_CONTEXT } = require("./config");
-const { prepareMessagesForIngest } = require("./util");
-
-class ChatSession {
- model;
- modelName;
- /**
- * @type {import('./gpt4all').ChatMessage[]}
- */
- messages;
- /**
- * @type {string}
- */
- systemPrompt;
- /**
- * @type {import('./gpt4all').LLModelPromptContext}
- */
- promptContext;
- /**
- * @type {boolean}
- */
- initialized;
-
- constructor(model, chatSessionOpts = {}) {
- const { messages, systemPrompt, ...sessionDefaultPromptContext } =
- chatSessionOpts;
- this.model = model;
- this.modelName = model.llm.name();
- this.messages = messages ?? [];
- this.systemPrompt = systemPrompt ?? model.config.systemPrompt;
- this.initialized = false;
- this.promptContext = {
- ...DEFAULT_PROMPT_CONTEXT,
- ...sessionDefaultPromptContext,
- nPast: 0,
- };
- }
-
- async initialize(completionOpts = {}) {
- if (this.model.activeChatSession !== this) {
- this.model.activeChatSession = this;
- }
-
- let tokensIngested = 0;
-
- // ingest system prompt
-
- if (this.systemPrompt) {
- const systemRes = await this.model.generate(this.systemPrompt, {
- promptTemplate: "%1",
- nPredict: 0,
- special: true,
- nBatch: this.promptContext.nBatch,
- // verbose: true,
- });
- tokensIngested += systemRes.tokensIngested;
- this.promptContext.nPast = systemRes.nPast;
- }
-
- // ingest initial messages
- if (this.messages.length > 0) {
- tokensIngested += await this.ingestMessages(
- this.messages,
- completionOpts
- );
- }
-
- this.initialized = true;
-
- return tokensIngested;
- }
-
- async ingestMessages(messages, completionOpts = {}) {
- const turns = prepareMessagesForIngest(messages);
-
- // send the message pairs to the model
- let tokensIngested = 0;
-
- for (const turn of turns) {
- const turnRes = await this.model.generate(turn.user, {
- ...this.promptContext,
- ...completionOpts,
- fakeReply: turn.assistant,
- });
- tokensIngested += turnRes.tokensIngested;
- this.promptContext.nPast = turnRes.nPast;
- }
- return tokensIngested;
- }
-
- async generate(input, completionOpts = {}) {
- if (this.model.activeChatSession !== this) {
- throw new Error(
- "Chat session is not active. Create a new chat session or call initialize to continue."
- );
- }
- if (completionOpts.nPast > this.promptContext.nPast) {
- throw new Error(
- `nPast cannot be greater than ${this.promptContext.nPast}.`
- );
- }
- let tokensIngested = 0;
-
- if (!this.initialized) {
- tokensIngested += await this.initialize(completionOpts);
- }
-
- let prompt = input;
-
- if (Array.isArray(input)) {
- // assuming input is a messages array
- // -> tailing user message will be used as the final prompt. its optional.
- // -> all system messages will be ignored.
- // -> all other messages will be ingested with fakeReply
- // -> user/assistant messages will be pushed into the messages array
-
- let tailingUserMessage = "";
- let messagesToIngest = input;
-
- const lastMessage = input[input.length - 1];
- if (lastMessage.role === "user") {
- tailingUserMessage = lastMessage.content;
- messagesToIngest = input.slice(0, input.length - 1);
- }
-
- if (messagesToIngest.length > 0) {
- tokensIngested += await this.ingestMessages(
- messagesToIngest,
- completionOpts
- );
- this.messages.push(...messagesToIngest);
- }
-
- if (tailingUserMessage) {
- prompt = tailingUserMessage;
- } else {
- return {
- text: "",
- nPast: this.promptContext.nPast,
- tokensIngested,
- tokensGenerated: 0,
- };
- }
- }
-
- const result = await this.model.generate(prompt, {
- ...this.promptContext,
- ...completionOpts,
- });
-
- this.promptContext.nPast = result.nPast;
- result.tokensIngested += tokensIngested;
-
- this.messages.push({
- role: "user",
- content: prompt,
- });
- this.messages.push({
- role: "assistant",
- content: result.text,
- });
-
- return result;
- }
-}
-
-module.exports = {
- ChatSession,
-};
diff --git a/gpt4all-bindings/typescript/src/config.js b/gpt4all-bindings/typescript/src/config.js
deleted file mode 100644
index 29ce8e49..00000000
--- a/gpt4all-bindings/typescript/src/config.js
+++ /dev/null
@@ -1,48 +0,0 @@
-const os = require("node:os");
-const path = require("node:path");
-
-const DEFAULT_DIRECTORY = path.resolve(os.homedir(), ".cache/gpt4all");
-
-const librarySearchPaths = [
- path.join(DEFAULT_DIRECTORY, "libraries"),
- path.resolve("./libraries"),
- path.resolve(
- __dirname,
- "..",
- `runtimes/${process.platform}-${process.arch}/native`,
- ),
- //for darwin. This is hardcoded for now but it should work
- path.resolve(
- __dirname,
- "..",
- `runtimes/${process.platform}/native`,
- ),
- process.cwd(),
-];
-
-const DEFAULT_LIBRARIES_DIRECTORY = librarySearchPaths.join(";");
-
-const DEFAULT_MODEL_CONFIG = {
- systemPrompt: "",
- promptTemplate: "### Human:\n%1\n\n### Assistant:\n",
-}
-
-const DEFAULT_MODEL_LIST_URL = "https://gpt4all.io/models/models3.json";
-
-const DEFAULT_PROMPT_CONTEXT = {
- temp: 0.1,
- topK: 40,
- topP: 0.9,
- minP: 0.0,
- repeatPenalty: 1.18,
- repeatLastN: 10,
- nBatch: 100,
-}
-
-module.exports = {
- DEFAULT_DIRECTORY,
- DEFAULT_LIBRARIES_DIRECTORY,
- DEFAULT_MODEL_CONFIG,
- DEFAULT_MODEL_LIST_URL,
- DEFAULT_PROMPT_CONTEXT,
-};
diff --git a/gpt4all-bindings/typescript/src/gpt4all.d.ts b/gpt4all-bindings/typescript/src/gpt4all.d.ts
deleted file mode 100644
index 4d9bfdcc..00000000
--- a/gpt4all-bindings/typescript/src/gpt4all.d.ts
+++ /dev/null
@@ -1,858 +0,0 @@
-///
-declare module "gpt4all";
-
-interface LLModelOptions {
- /**
- * Model architecture. This argument currently does not have any functionality and is just used as descriptive identifier for user.
- */
- type?: string;
- model_name: string;
- model_path: string;
- library_path?: string;
-}
-
-interface ModelConfig {
- systemPrompt: string;
- promptTemplate: string;
- path: string;
- url?: string;
-}
-
-/**
- * Options for the chat session.
- */
-interface ChatSessionOptions extends Partial {
- /**
- * System prompt to ingest on initialization.
- */
- systemPrompt?: string;
-
- /**
- * Messages to ingest on initialization.
- */
- messages?: ChatMessage[];
-}
-
-/**
- * ChatSession utilizes an InferenceModel for efficient processing of chat conversations.
- */
-declare class ChatSession implements CompletionProvider {
- /**
- * Constructs a new ChatSession using the provided InferenceModel and options.
- * Does not set the chat session as the active chat session until initialize is called.
- * @param {InferenceModel} model An InferenceModel instance.
- * @param {ChatSessionOptions} [options] Options for the chat session including default completion options.
- */
- constructor(model: InferenceModel, options?: ChatSessionOptions);
- /**
- * The underlying InferenceModel used for generating completions.
- */
- model: InferenceModel;
- /**
- * The name of the model.
- */
- modelName: string;
- /**
- * The messages that have been exchanged in this chat session.
- */
- messages: ChatMessage[];
- /**
- * The system prompt that has been ingested at the beginning of the chat session.
- */
- systemPrompt: string;
- /**
- * The current prompt context of the chat session.
- */
- promptContext: LLModelPromptContext;
-
- /**
- * Ingests system prompt and initial messages.
- * Sets this chat session as the active chat session of the model.
- * @param {CompletionOptions} [options] Set completion options for initialization.
- * @returns {Promise} The number of tokens ingested during initialization. systemPrompt + messages.
- */
- initialize(completionOpts?: CompletionOptions): Promise;
-
- /**
- * Prompts the model in chat-session context.
- * @param {CompletionInput} input Input string or message array.
- * @param {CompletionOptions} [options] Set completion options for this generation.
- * @returns {Promise} The inference result.
- * @throws {Error} If the chat session is not the active chat session of the model.
- * @throws {Error} If nPast is set to a value higher than what has been ingested in the session.
- */
- generate(
- input: CompletionInput,
- options?: CompletionOptions
- ): Promise;
-}
-
-/**
- * Shape of InferenceModel generations.
- */
-interface InferenceResult extends LLModelInferenceResult {
- tokensIngested: number;
- tokensGenerated: number;
-}
-
-/**
- * InferenceModel represents an LLM which can make next-token predictions.
- */
-declare class InferenceModel implements CompletionProvider {
- constructor(llm: LLModel, config: ModelConfig);
- /** The native LLModel */
- llm: LLModel;
- /** The configuration the instance was constructed with. */
- config: ModelConfig;
- /** The active chat session of the model. */
- activeChatSession?: ChatSession;
- /** The name of the model. */
- modelName: string;
-
- /**
- * Create a chat session with the model and set it as the active chat session of this model.
- * A model instance can only have one active chat session at a time.
- * @param {ChatSessionOptions} options The options for the chat session.
- * @returns {Promise} The chat session.
- */
- createChatSession(options?: ChatSessionOptions): Promise;
-
- /**
- * Prompts the model with a given input and optional parameters.
- * @param {CompletionInput} input The prompt input.
- * @param {CompletionOptions} options Prompt context and other options.
- * @returns {Promise} The model's response to the prompt.
- * @throws {Error} If nPast is set to a value smaller than 0.
- * @throws {Error} If a messages array without a tailing user message is provided.
- */
- generate(
- prompt: string,
- options?: CompletionOptions
- ): Promise;
-
- /**
- * delete and cleanup the native model
- */
- dispose(): void;
-}
-
-/**
- * Options for generating one or more embeddings.
- */
-interface EmbedddingOptions {
- /**
- * The model-specific prefix representing the embedding task, without the trailing colon. For Nomic Embed
- * this can be `search_query`, `search_document`, `classification`, or `clustering`.
- */
- prefix?: string;
- /**
- *The embedding dimension, for use with Matryoshka-capable models. Defaults to full-size.
- * @default determines on the model being used.
- */
- dimensionality?: number;
- /**
- * How to handle texts longer than the model can accept. One of `mean` or `truncate`.
- * @default "mean"
- */
- longTextMode?: "mean" | "truncate";
- /**
- * Try to be fully compatible with the Atlas API. Currently, this means texts longer than 8192 tokens
- * with long_text_mode="mean" will raise an error. Disabled by default.
- * @default false
- */
- atlas?: boolean;
-}
-
-/**
- * The nodejs moral equivalent to python binding's Embed4All().embed()
- * meow
- * @param {EmbeddingModel} model The embedding model instance.
- * @param {string} text Text to embed.
- * @param {EmbeddingOptions} options Optional parameters for the embedding.
- * @returns {EmbeddingResult} The embedding result.
- * @throws {Error} If dimensionality is set to a value smaller than 1.
- */
-declare function createEmbedding(
- model: EmbeddingModel,
- text: string,
- options?: EmbedddingOptions
-): EmbeddingResult;
-
-/**
- * Overload that takes multiple strings to embed.
- * @param {EmbeddingModel} model The embedding model instance.
- * @param {string[]} texts Texts to embed.
- * @param {EmbeddingOptions} options Optional parameters for the embedding.
- * @returns {EmbeddingResult} The embedding result.
- * @throws {Error} If dimensionality is set to a value smaller than 1.
- */
-declare function createEmbedding(
- model: EmbeddingModel,
- text: string[],
- options?: EmbedddingOptions
-): EmbeddingResult;
-
-/**
- * The resulting embedding.
- */
-interface EmbeddingResult {
- /**
- * Encoded token count. Includes overlap but specifically excludes tokens used for the prefix/task_type, BOS/CLS token, and EOS/SEP token
- **/
- n_prompt_tokens: number;
-
- embeddings: T;
-}
-/**
- * EmbeddingModel represents an LLM which can create embeddings, which are float arrays
- */
-declare class EmbeddingModel {
- constructor(llm: LLModel, config: ModelConfig);
- /** The native LLModel */
- llm: LLModel;
- /** The configuration the instance was constructed with. */
- config: ModelConfig;
-
- /**
- * Create an embedding from a given input string. See EmbeddingOptions.
- * @param {string} text
- * @param {string} prefix
- * @param {number} dimensionality
- * @param {boolean} doMean
- * @param {boolean} atlas
- * @returns {EmbeddingResult} The embedding result.
- */
- embed(
- text: string,
- prefix: string,
- dimensionality: number,
- doMean: boolean,
- atlas: boolean
- ): EmbeddingResult;
- /**
- * Create an embedding from a given input text array. See EmbeddingOptions.
- * @param {string[]} text
- * @param {string} prefix
- * @param {number} dimensionality
- * @param {boolean} doMean
- * @param {boolean} atlas
- * @returns {EmbeddingResult} The embedding result.
- */
- embed(
- text: string[],
- prefix: string,
- dimensionality: number,
- doMean: boolean,
- atlas: boolean
- ): EmbeddingResult;
-
- /**
- * delete and cleanup the native model
- */
- dispose(): void;
-}
-
-/**
- * Shape of LLModel's inference result.
- */
-interface LLModelInferenceResult {
- text: string;
- nPast: number;
-}
-
-interface LLModelInferenceOptions extends Partial {
- /** Callback for response tokens, called for each generated token.
- * @param {number} tokenId The token id.
- * @param {string} token The token.
- * @returns {boolean | undefined} Whether to continue generating tokens.
- * */
- onResponseToken?: (tokenId: number, token: string) => boolean | void;
- /** Callback for prompt tokens, called for each input token in the prompt.
- * @param {number} tokenId The token id.
- * @returns {boolean | undefined} Whether to continue ingesting the prompt.
- * */
- onPromptToken?: (tokenId: number) => boolean | void;
-}
-
-/**
- * LLModel class representing a language model.
- * This is a base class that provides common functionality for different types of language models.
- */
-declare class LLModel {
- /**
- * Initialize a new LLModel.
- * @param {string} path Absolute path to the model file.
- * @throws {Error} If the model file does not exist.
- */
- constructor(options: LLModelOptions);
-
- /** undefined or user supplied */
- type(): string | undefined;
-
- /** The name of the model. */
- name(): string;
-
- /**
- * Get the size of the internal state of the model.
- * NOTE: This state data is specific to the type of model you have created.
- * @return the size in bytes of the internal state of the model
- */
- stateSize(): number;
-
- /**
- * Get the number of threads used for model inference.
- * The default is the number of physical cores your computer has.
- * @returns The number of threads used for model inference.
- */
- threadCount(): number;
-
- /**
- * Set the number of threads used for model inference.
- * @param newNumber The new number of threads.
- */
- setThreadCount(newNumber: number): void;
-
- /**
- * Prompt the model directly with a given input string and optional parameters.
- * Use the higher level createCompletion methods for a more user-friendly interface.
- * @param {string} prompt The prompt input.
- * @param {LLModelInferenceOptions} options Optional parameters for the generation.
- * @returns {LLModelInferenceResult} The response text and final context size.
- */
- infer(
- prompt: string,
- options: LLModelInferenceOptions
- ): Promise;
-
- /**
- * Embed text with the model. See EmbeddingOptions for more information.
- * Use the higher level createEmbedding methods for a more user-friendly interface.
- * @param {string} text
- * @param {string} prefix
- * @param {number} dimensionality
- * @param {boolean} doMean
- * @param {boolean} atlas
- * @returns {Float32Array} The embedding of the text.
- */
- embed(
- text: string,
- prefix: string,
- dimensionality: number,
- doMean: boolean,
- atlas: boolean
- ): Float32Array;
-
- /**
- * Embed multiple texts with the model. See EmbeddingOptions for more information.
- * Use the higher level createEmbedding methods for a more user-friendly interface.
- * @param {string[]} texts
- * @param {string} prefix
- * @param {number} dimensionality
- * @param {boolean} doMean
- * @param {boolean} atlas
- * @returns {Float32Array[]} The embeddings of the texts.
- */
- embed(
- texts: string,
- prefix: string,
- dimensionality: number,
- doMean: boolean,
- atlas: boolean
- ): Float32Array[];
-
- /**
- * Whether the model is loaded or not.
- */
- isModelLoaded(): boolean;
-
- /**
- * Where to search for the pluggable backend libraries
- */
- setLibraryPath(s: string): void;
-
- /**
- * Where to get the pluggable backend libraries
- */
- getLibraryPath(): string;
-
- /**
- * Initiate a GPU by a string identifier.
- * @param {number} memory_required Should be in the range size_t or will throw
- * @param {string} device_name 'amd' | 'nvidia' | 'intel' | 'gpu' | gpu name.
- * read LoadModelOptions.device for more information
- */
- initGpuByString(memory_required: number, device_name: string): boolean;
-
- /**
- * From C documentation
- * @returns True if a GPU device is successfully initialized, false otherwise.
- */
- hasGpuDevice(): boolean;
-
- /**
- * GPUs that are usable for this LLModel
- * @param {number} nCtx Maximum size of context window
- * @throws if hasGpuDevice returns false (i think)
- * @returns
- */
- listGpu(nCtx: number): GpuDevice[];
-
- /**
- * delete and cleanup the native model
- */
- dispose(): void;
-}
-/**
- * an object that contains gpu data on this machine.
- */
-interface GpuDevice {
- index: number;
- /**
- * same as VkPhysicalDeviceType
- */
- type: number;
- heapSize: number;
- name: string;
- vendor: string;
-}
-
-/**
- * Options that configure a model's behavior.
- */
-interface LoadModelOptions {
- /**
- * Where to look for model files.
- */
- modelPath?: string;
- /**
- * Where to look for the backend libraries.
- */
- librariesPath?: string;
- /**
- * The path to the model configuration file, useful for offline usage or custom model configurations.
- */
- modelConfigFile?: string;
- /**
- * Whether to allow downloading the model if it is not present at the specified path.
- */
- allowDownload?: boolean;
- /**
- * Enable verbose logging.
- */
- verbose?: boolean;
- /**
- * The processing unit on which the model will run. It can be set to
- * - "cpu": Model will run on the central processing unit.
- * - "gpu": Model will run on the best available graphics processing unit, irrespective of its vendor.
- * - "amd", "nvidia", "intel": Model will run on the best available GPU from the specified vendor.
- * - "gpu name": Model will run on the GPU that matches the name if it's available.
- * Note: If a GPU device lacks sufficient RAM to accommodate the model, an error will be thrown, and the GPT4All
- * instance will be rendered invalid. It's advised to ensure the device has enough memory before initiating the
- * model.
- * @default "cpu"
- */
- device?: string;
- /**
- * The Maximum window size of this model
- * @default 2048
- */
- nCtx?: number;
- /**
- * Number of gpu layers needed
- * @default 100
- */
- ngl?: number;
-}
-
-interface InferenceModelOptions extends LoadModelOptions {
- type?: "inference";
-}
-
-interface EmbeddingModelOptions extends LoadModelOptions {
- type: "embedding";
-}
-
-/**
- * Loads a machine learning model with the specified name. The defacto way to create a model.
- * By default this will download a model from the official GPT4ALL website, if a model is not present at given path.
- *
- * @param {string} modelName - The name of the model to load.
- * @param {LoadModelOptions|undefined} [options] - (Optional) Additional options for loading the model.
- * @returns {Promise} A promise that resolves to an instance of the loaded LLModel.
- */
-declare function loadModel(
- modelName: string,
- options?: InferenceModelOptions
-): Promise;
-
-declare function loadModel(
- modelName: string,
- options?: EmbeddingModelOptions
-): Promise;
-
-declare function loadModel(
- modelName: string,
- options?: EmbeddingModelOptions | InferenceModelOptions
-): Promise;
-
-/**
- * Interface for createCompletion methods, implemented by InferenceModel and ChatSession.
- * Implement your own CompletionProvider or extend ChatSession to generate completions with custom logic.
- */
-interface CompletionProvider {
- modelName: string;
- generate(
- input: CompletionInput,
- options?: CompletionOptions
- ): Promise;
-}
-
-/**
- * Options for creating a completion.
- */
-interface CompletionOptions extends LLModelInferenceOptions {
- /**
- * Indicates if verbose logging is enabled.
- * @default false
- */
- verbose?: boolean;
-}
-
-/**
- * The input for creating a completion. May be a string or an array of messages.
- */
-type CompletionInput = string | ChatMessage[];
-
-/**
- * The nodejs equivalent to python binding's chat_completion
- * @param {CompletionProvider} provider - The inference model object or chat session
- * @param {CompletionInput} input - The input string or message array
- * @param {CompletionOptions} options - The options for creating the completion.
- * @returns {CompletionResult} The completion result.
- */
-declare function createCompletion(
- provider: CompletionProvider,
- input: CompletionInput,
- options?: CompletionOptions
-): Promise;
-
-/**
- * Streaming variant of createCompletion, returns a stream of tokens and a promise that resolves to the completion result.
- * @param {CompletionProvider} provider - The inference model object or chat session
- * @param {CompletionInput} input - The input string or message array
- * @param {CompletionOptions} options - The options for creating the completion.
- * @returns {CompletionStreamReturn} An object of token stream and the completion result promise.
- */
-declare function createCompletionStream(
- provider: CompletionProvider,
- input: CompletionInput,
- options?: CompletionOptions
-): CompletionStreamReturn;
-
-/**
- * The result of a streamed completion, containing a stream of tokens and a promise that resolves to the completion result.
- */
-interface CompletionStreamReturn {
- tokens: NodeJS.ReadableStream;
- result: Promise;
-}
-
-/**
- * Async generator variant of createCompletion, yields tokens as they are generated and returns the completion result.
- * @param {CompletionProvider} provider - The inference model object or chat session
- * @param {CompletionInput} input - The input string or message array
- * @param {CompletionOptions} options - The options for creating the completion.
- * @returns {AsyncGenerator} The stream of generated tokens
- */
-declare function createCompletionGenerator(
- provider: CompletionProvider,
- input: CompletionInput,
- options: CompletionOptions
-): AsyncGenerator;
-
-/**
- * A message in the conversation.
- */
-interface ChatMessage {
- /** The role of the message. */
- role: "system" | "assistant" | "user";
-
- /** The message content. */
- content: string;
-}
-
-/**
- * The result of a completion.
- */
-interface CompletionResult {
- /** The model used for the completion. */
- model: string;
-
- /** Token usage report. */
- usage: {
- /** The number of tokens ingested during the completion. */
- prompt_tokens: number;
-
- /** The number of tokens generated in the completion. */
- completion_tokens: number;
-
- /** The total number of tokens used. */
- total_tokens: number;
-
- /** Number of tokens used in the conversation. */
- n_past_tokens: number;
- };
-
- /** The generated completion. */
- choices: Array<{
- message: ChatMessage;
- }>;
-}
-
-/**
- * Model inference arguments for generating completions.
- */
-interface LLModelPromptContext {
- /** The size of the raw logits vector. */
- logitsSize: number;
-
- /** The size of the raw tokens vector. */
- tokensSize: number;
-
- /** The number of tokens in the past conversation.
- * This may be used to "roll back" the conversation to a previous state.
- * Note that for most use cases the default value should be sufficient and this should not be set.
- * @default 0 For completions using InferenceModel, meaning the model will only consider the input prompt.
- * @default nPast For completions using ChatSession. This means the context window will be automatically determined
- * and possibly resized (see contextErase) to keep the conversation performant.
- * */
- nPast: number;
-
- /** The maximum number of tokens to predict.
- * @default 4096
- * */
- nPredict: number;
-
- /** Template for user / assistant message pairs.
- * %1 is required and will be replaced by the user input.
- * %2 is optional and will be replaced by the assistant response. If not present, the assistant response will be appended.
- */
- promptTemplate?: string;
-
- /** The context window size. Do not use, it has no effect. See loadModel options.
- * THIS IS DEPRECATED!!!
- * Use loadModel's nCtx option instead.
- * @default 2048
- */
- nCtx: number;
-
- /** The top-k logits to sample from.
- * Top-K sampling selects the next token only from the top K most likely tokens predicted by the model.
- * It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit
- * the diversity of the output. A higher value for top-K (eg., 100) will consider more tokens and lead
- * to more diverse text, while a lower value (eg., 10) will focus on the most probable tokens and generate
- * more conservative text. 30 - 60 is a good range for most tasks.
- * @default 40
- * */
- topK: number;
-
- /** The nucleus sampling probability threshold.
- * Top-P limits the selection of the next token to a subset of tokens with a cumulative probability
- * above a threshold P. This method, also known as nucleus sampling, finds a balance between diversity
- * and quality by considering both token probabilities and the number of tokens available for sampling.
- * When using a higher value for top-P (eg., 0.95), the generated text becomes more diverse.
- * On the other hand, a lower value (eg., 0.1) produces more focused and conservative text.
- * @default 0.9
- *
- * */
- topP: number;
-
- /**
- * The minimum probability of a token to be considered.
- * @default 0.0
- */
- minP: number;
-
- /** The temperature to adjust the model's output distribution.
- * Temperature is like a knob that adjusts how creative or focused the output becomes. Higher temperatures
- * (eg., 1.2) increase randomness, resulting in more imaginative and diverse text. Lower temperatures (eg., 0.5)
- * make the output more focused, predictable, and conservative. When the temperature is set to 0, the output
- * becomes completely deterministic, always selecting the most probable next token and producing identical results
- * each time. Try what value fits best for your use case and model.
- * @default 0.1
- * @alias temperature
- * */
- temp: number;
- temperature: number;
-
- /** The number of predictions to generate in parallel.
- * By splitting the prompt every N tokens, prompt-batch-size reduces RAM usage during processing. However,
- * this can increase the processing time as a trade-off. If the N value is set too low (e.g., 10), long prompts
- * with 500+ tokens will be most affected, requiring numerous processing runs to complete the prompt processing.
- * To ensure optimal performance, setting the prompt-batch-size to 2048 allows processing of all tokens in a single run.
- * @default 8
- * */
- nBatch: number;
-
- /** The penalty factor for repeated tokens.
- * Repeat-penalty can help penalize tokens based on how frequently they occur in the text, including the input prompt.
- * A token that has already appeared five times is penalized more heavily than a token that has appeared only one time.
- * A value of 1 means that there is no penalty and values larger than 1 discourage repeated tokens.
- * @default 1.18
- * */
- repeatPenalty: number;
-
- /** The number of last tokens to penalize.
- * The repeat-penalty-tokens N option controls the number of tokens in the history to consider for penalizing repetition.
- * A larger value will look further back in the generated text to prevent repetitions, while a smaller value will only
- * consider recent tokens.
- * @default 10
- * */
- repeatLastN: number;
-
- /** The percentage of context to erase if the context window is exceeded.
- * Set it to a lower value to keep context for longer at the cost of performance.
- * @default 0.75
- * */
- contextErase: number;
-}
-
-/**
- * From python api:
- * models will be stored in (homedir)/.cache/gpt4all/`
- */
-declare const DEFAULT_DIRECTORY: string;
-/**
- * From python api:
- * The default path for dynamic libraries to be stored.
- * You may separate paths by a semicolon to search in multiple areas.
- * This searches DEFAULT_DIRECTORY/libraries, cwd/libraries, and finally cwd.
- */
-declare const DEFAULT_LIBRARIES_DIRECTORY: string;
-
-/**
- * Default model configuration.
- */
-declare const DEFAULT_MODEL_CONFIG: ModelConfig;
-
-/**
- * Default prompt context.
- */
-declare const DEFAULT_PROMPT_CONTEXT: LLModelPromptContext;
-
-/**
- * Default model list url.
- */
-declare const DEFAULT_MODEL_LIST_URL: string;
-
-/**
- * Initiates the download of a model file.
- * By default this downloads without waiting. use the controller returned to alter this behavior.
- * @param {string} modelName - The model to be downloaded.
- * @param {DownloadModelOptions} options - to pass into the downloader. Default is { location: (cwd), verbose: false }.
- * @returns {DownloadController} object that allows controlling the download process.
- *
- * @throws {Error} If the model already exists in the specified location.
- * @throws {Error} If the model cannot be found at the specified url.
- *
- * @example
- * const download = downloadModel('ggml-gpt4all-j-v1.3-groovy.bin')
- * download.promise.then(() => console.log('Downloaded!'))
- */
-declare function downloadModel(
- modelName: string,
- options?: DownloadModelOptions
-): DownloadController;
-
-/**
- * Options for the model download process.
- */
-interface DownloadModelOptions {
- /**
- * location to download the model.
- * Default is process.cwd(), or the current working directory
- */
- modelPath?: string;
-
- /**
- * Debug mode -- check how long it took to download in seconds
- * @default false
- */
- verbose?: boolean;
-
- /**
- * Remote download url. Defaults to `https://gpt4all.io/models/gguf/`
- * @default https://gpt4all.io/models/gguf/
- */
- url?: string;
- /**
- * MD5 sum of the model file. If this is provided, the downloaded file will be checked against this sum.
- * If the sums do not match, an error will be thrown and the file will be deleted.
- */
- md5sum?: string;
-}
-
-interface ListModelsOptions {
- url?: string;
- file?: string;
-}
-
-declare function listModels(
- options?: ListModelsOptions
-): Promise;
-
-interface RetrieveModelOptions {
- allowDownload?: boolean;
- verbose?: boolean;
- modelPath?: string;
- modelConfigFile?: string;
-}
-
-declare function retrieveModel(
- modelName: string,
- options?: RetrieveModelOptions
-): Promise;
-
-/**
- * Model download controller.
- */
-interface DownloadController {
- /** Cancel the request to download if this is called. */
- cancel: () => void;
- /** A promise resolving to the downloaded models config once the download is done */
- promise: Promise;
-}
-
-export {
- LLModel,
- LLModelPromptContext,
- ModelConfig,
- InferenceModel,
- InferenceResult,
- EmbeddingModel,
- EmbeddingResult,
- ChatSession,
- ChatMessage,
- CompletionInput,
- CompletionProvider,
- CompletionOptions,
- CompletionResult,
- LoadModelOptions,
- DownloadController,
- RetrieveModelOptions,
- DownloadModelOptions,
- GpuDevice,
- loadModel,
- downloadModel,
- retrieveModel,
- listModels,
- createCompletion,
- createCompletionStream,
- createCompletionGenerator,
- createEmbedding,
- DEFAULT_DIRECTORY,
- DEFAULT_LIBRARIES_DIRECTORY,
- DEFAULT_MODEL_CONFIG,
- DEFAULT_PROMPT_CONTEXT,
- DEFAULT_MODEL_LIST_URL,
-};
diff --git a/gpt4all-bindings/typescript/src/gpt4all.js b/gpt4all-bindings/typescript/src/gpt4all.js
deleted file mode 100644
index aab01d91..00000000
--- a/gpt4all-bindings/typescript/src/gpt4all.js
+++ /dev/null
@@ -1,220 +0,0 @@
-"use strict";
-
-/// This file implements the gpt4all.d.ts file endings.
-/// Written in commonjs to support both ESM and CJS projects.
-const { existsSync } = require("node:fs");
-const path = require("node:path");
-const Stream = require("node:stream");
-const assert = require("node:assert");
-const { LLModel } = require("node-gyp-build")(path.resolve(__dirname, ".."));
-const {
- retrieveModel,
- downloadModel,
- appendBinSuffixIfMissing,
-} = require("./util.js");
-const {
- DEFAULT_DIRECTORY,
- DEFAULT_LIBRARIES_DIRECTORY,
- DEFAULT_PROMPT_CONTEXT,
- DEFAULT_MODEL_CONFIG,
- DEFAULT_MODEL_LIST_URL,
-} = require("./config.js");
-const { InferenceModel, EmbeddingModel } = require("./models.js");
-const { ChatSession } = require("./chat-session.js");
-
-/**
- * Loads a machine learning model with the specified name. The defacto way to create a model.
- * By default this will download a model from the official GPT4ALL website, if a model is not present at given path.
- *
- * @param {string} modelName - The name of the model to load.
- * @param {import('./gpt4all').LoadModelOptions|undefined} [options] - (Optional) Additional options for loading the model.
- * @returns {Promise} A promise that resolves to an instance of the loaded LLModel.
- */
-async function loadModel(modelName, options = {}) {
- const loadOptions = {
- modelPath: DEFAULT_DIRECTORY,
- librariesPath: DEFAULT_LIBRARIES_DIRECTORY,
- type: "inference",
- allowDownload: true,
- verbose: false,
- device: "cpu",
- nCtx: 2048,
- ngl: 100,
- ...options,
- };
-
- const modelConfig = await retrieveModel(modelName, {
- modelPath: loadOptions.modelPath,
- modelConfigFile: loadOptions.modelConfigFile,
- allowDownload: loadOptions.allowDownload,
- verbose: loadOptions.verbose,
- });
-
- assert.ok(
- typeof loadOptions.librariesPath === "string",
- "Libraries path should be a string"
- );
- const existingPaths = loadOptions.librariesPath
- .split(";")
- .filter(existsSync)
- .join(";");
-
- const llmOptions = {
- model_name: appendBinSuffixIfMissing(modelName),
- model_path: loadOptions.modelPath,
- library_path: existingPaths,
- device: loadOptions.device,
- nCtx: loadOptions.nCtx,
- ngl: loadOptions.ngl,
- };
-
- if (loadOptions.verbose) {
- console.debug("Creating LLModel:", {
- llmOptions,
- modelConfig,
- });
- }
- const llmodel = new LLModel(llmOptions);
- if (loadOptions.type === "embedding") {
- return new EmbeddingModel(llmodel, modelConfig);
- } else if (loadOptions.type === "inference") {
- return new InferenceModel(llmodel, modelConfig);
- } else {
- throw Error("Invalid model type: " + loadOptions.type);
- }
-}
-
-function createEmbedding(model, text, options={}) {
- let {
- dimensionality = undefined,
- longTextMode = "mean",
- atlas = false,
- } = options;
-
- if (dimensionality === undefined) {
- dimensionality = -1;
- } else {
- if (dimensionality <= 0) {
- throw new Error(
- `Dimensionality must be undefined or a positive integer, got ${dimensionality}`
- );
- }
- if (dimensionality < model.MIN_DIMENSIONALITY) {
- console.warn(
- `Dimensionality ${dimensionality} is less than the suggested minimum of ${model.MIN_DIMENSIONALITY}. Performance may be degraded.`
- );
- }
- }
-
- let doMean;
- switch (longTextMode) {
- case "mean":
- doMean = true;
- break;
- case "truncate":
- doMean = false;
- break;
- default:
- throw new Error(
- `Long text mode must be one of 'mean' or 'truncate', got ${longTextMode}`
- );
- }
-
- return model.embed(text, options?.prefix, dimensionality, doMean, atlas);
-}
-
-const defaultCompletionOptions = {
- verbose: false,
- ...DEFAULT_PROMPT_CONTEXT,
-};
-
-async function createCompletion(
- provider,
- input,
- options = defaultCompletionOptions
-) {
- const completionOptions = {
- ...defaultCompletionOptions,
- ...options,
- };
-
- const result = await provider.generate(
- input,
- completionOptions,
- );
-
- return {
- model: provider.modelName,
- usage: {
- prompt_tokens: result.tokensIngested,
- total_tokens: result.tokensIngested + result.tokensGenerated,
- completion_tokens: result.tokensGenerated,
- n_past_tokens: result.nPast,
- },
- choices: [
- {
- message: {
- role: "assistant",
- content: result.text,
- },
- // TODO some completion APIs also provide logprobs and finish_reason, could look into adding those
- },
- ],
- };
-}
-
-function createCompletionStream(
- provider,
- input,
- options = defaultCompletionOptions
-) {
- const completionStream = new Stream.PassThrough({
- encoding: "utf-8",
- });
-
- const completionPromise = createCompletion(provider, input, {
- ...options,
- onResponseToken: (tokenId, token) => {
- completionStream.push(token);
- if (options.onResponseToken) {
- return options.onResponseToken(tokenId, token);
- }
- },
- }).then((result) => {
- completionStream.push(null);
- completionStream.emit("end");
- return result;
- });
-
- return {
- tokens: completionStream,
- result: completionPromise,
- };
-}
-
-async function* createCompletionGenerator(provider, input, options) {
- const completion = createCompletionStream(provider, input, options);
- for await (const chunk of completion.tokens) {
- yield chunk;
- }
- return await completion.result;
-}
-
-module.exports = {
- DEFAULT_LIBRARIES_DIRECTORY,
- DEFAULT_DIRECTORY,
- DEFAULT_PROMPT_CONTEXT,
- DEFAULT_MODEL_CONFIG,
- DEFAULT_MODEL_LIST_URL,
- LLModel,
- InferenceModel,
- EmbeddingModel,
- ChatSession,
- createCompletion,
- createCompletionStream,
- createCompletionGenerator,
- createEmbedding,
- downloadModel,
- retrieveModel,
- loadModel,
-};
diff --git a/gpt4all-bindings/typescript/src/models.js b/gpt4all-bindings/typescript/src/models.js
deleted file mode 100644
index 2c516ccb..00000000
--- a/gpt4all-bindings/typescript/src/models.js
+++ /dev/null
@@ -1,165 +0,0 @@
-const { DEFAULT_PROMPT_CONTEXT } = require("./config");
-const { ChatSession } = require("./chat-session");
-const { prepareMessagesForIngest } = require("./util");
-
-class InferenceModel {
- llm;
- modelName;
- config;
- activeChatSession;
-
- constructor(llmodel, config) {
- this.llm = llmodel;
- this.config = config;
- this.modelName = this.llm.name();
- }
-
- async createChatSession(options) {
- const chatSession = new ChatSession(this, options);
- await chatSession.initialize();
- this.activeChatSession = chatSession;
- return this.activeChatSession;
- }
-
- async generate(input, options = DEFAULT_PROMPT_CONTEXT) {
- const { verbose, ...otherOptions } = options;
- const promptContext = {
- promptTemplate: this.config.promptTemplate,
- temp:
- otherOptions.temp ??
- otherOptions.temperature ??
- DEFAULT_PROMPT_CONTEXT.temp,
- ...otherOptions,
- };
-
- if (promptContext.nPast < 0) {
- throw new Error("nPast must be a non-negative integer.");
- }
-
- if (verbose) {
- console.debug("Generating completion", {
- input,
- promptContext,
- });
- }
-
- let prompt = input;
- let nPast = promptContext.nPast;
- let tokensIngested = 0;
-
- if (Array.isArray(input)) {
- // assuming input is a messages array
- // -> tailing user message will be used as the final prompt. its required.
- // -> leading system message will be ingested as systemPrompt, further system messages will be ignored
- // -> all other messages will be ingested with fakeReply
- // -> model/context will only be kept for this completion; "stateless"
- nPast = 0;
- const messages = [...input];
- const lastMessage = input[input.length - 1];
- if (lastMessage.role !== "user") {
- // this is most likely a user error
- throw new Error("The final message must be of role 'user'.");
- }
- if (input[0].role === "system") {
- // needs to be a pre-templated prompt ala '<|im_start|>system\nYou are an advanced mathematician.\n<|im_end|>\n'
- const systemPrompt = input[0].content;
- const systemRes = await this.llm.infer(systemPrompt, {
- promptTemplate: "%1",
- nPredict: 0,
- special: true,
- });
- nPast = systemRes.nPast;
- tokensIngested += systemRes.tokensIngested;
- messages.shift();
- }
-
- prompt = lastMessage.content;
- const messagesToIngest = messages.slice(0, input.length - 1);
- const turns = prepareMessagesForIngest(messagesToIngest);
-
- for (const turn of turns) {
- const turnRes = await this.llm.infer(turn.user, {
- ...promptContext,
- nPast,
- fakeReply: turn.assistant,
- });
- tokensIngested += turnRes.tokensIngested;
- nPast = turnRes.nPast;
- }
- }
-
- let tokensGenerated = 0;
-
- const result = await this.llm.infer(prompt, {
- ...promptContext,
- nPast,
- onPromptToken: (tokenId) => {
- let continueIngestion = true;
- tokensIngested++;
- if (options.onPromptToken) {
- // catch errors because if they go through cpp they will loose stacktraces
- try {
- // don't cancel ingestion unless user explicitly returns false
- continueIngestion =
- options.onPromptToken(tokenId) !== false;
- } catch (e) {
- console.error("Error in onPromptToken callback", e);
- continueIngestion = false;
- }
- }
- return continueIngestion;
- },
- onResponseToken: (tokenId, token) => {
- let continueGeneration = true;
- tokensGenerated++;
- if (options.onResponseToken) {
- try {
- // don't cancel the generation unless user explicitly returns false
- continueGeneration =
- options.onResponseToken(tokenId, token) !== false;
- } catch (err) {
- console.error("Error in onResponseToken callback", err);
- continueGeneration = false;
- }
- }
- return continueGeneration;
- },
- });
-
- result.tokensGenerated = tokensGenerated;
- result.tokensIngested = tokensIngested;
-
- if (verbose) {
- console.debug("Finished completion:\n", result);
- }
-
- return result;
- }
-
- dispose() {
- this.llm.dispose();
- }
-}
-
-class EmbeddingModel {
- llm;
- config;
- MIN_DIMENSIONALITY = 64;
- constructor(llmodel, config) {
- this.llm = llmodel;
- this.config = config;
- }
-
- embed(text, prefix, dimensionality, do_mean, atlas) {
- return this.llm.embed(text, prefix, dimensionality, do_mean, atlas);
- }
-
- dispose() {
- this.llm.dispose();
- }
-}
-
-module.exports = {
- InferenceModel,
- EmbeddingModel,
-};
diff --git a/gpt4all-bindings/typescript/src/util.js b/gpt4all-bindings/typescript/src/util.js
deleted file mode 100644
index b9c9979b..00000000
--- a/gpt4all-bindings/typescript/src/util.js
+++ /dev/null
@@ -1,317 +0,0 @@
-const { createWriteStream, existsSync, statSync, mkdirSync } = require("node:fs");
-const fsp = require("node:fs/promises");
-const { performance } = require("node:perf_hooks");
-const path = require("node:path");
-const md5File = require("md5-file");
-const {
- DEFAULT_DIRECTORY,
- DEFAULT_MODEL_CONFIG,
- DEFAULT_MODEL_LIST_URL,
-} = require("./config.js");
-
-async function listModels(
- options = {
- url: DEFAULT_MODEL_LIST_URL,
- }
-) {
- if (!options || (!options.url && !options.file)) {
- throw new Error(
- `No model list source specified. Please specify either a url or a file.`
- );
- }
-
- if (options.file) {
- if (!existsSync(options.file)) {
- throw new Error(`Model list file ${options.file} does not exist.`);
- }
-
- const fileContents = await fsp.readFile(options.file, "utf-8");
- const modelList = JSON.parse(fileContents);
- return modelList;
- } else if (options.url) {
- const res = await fetch(options.url);
-
- if (!res.ok) {
- throw Error(
- `Failed to retrieve model list from ${url} - ${res.status} ${res.statusText}`
- );
- }
- const modelList = await res.json();
- return modelList;
- }
-}
-
-function appendBinSuffixIfMissing(name) {
- const ext = path.extname(name);
- if (![".bin", ".gguf"].includes(ext)) {
- return name + ".gguf";
- }
- return name;
-}
-
-function prepareMessagesForIngest(messages) {
- const systemMessages = messages.filter(
- (message) => message.role === "system"
- );
- if (systemMessages.length > 0) {
- console.warn(
- "System messages are currently not supported and will be ignored. Use the systemPrompt option instead."
- );
- }
-
- const userAssistantMessages = messages.filter(
- (message) => message.role !== "system"
- );
-
- // make sure the first message is a user message
- // if its not, the turns will be out of order
- if (userAssistantMessages[0].role !== "user") {
- userAssistantMessages.unshift({
- role: "user",
- content: "",
- });
- }
-
- // create turns of user input + assistant reply
- const turns = [];
- let userMessage = null;
- let assistantMessage = null;
-
- for (const message of userAssistantMessages) {
- // consecutive messages of the same role are concatenated into one message
- if (message.role === "user") {
- if (!userMessage) {
- userMessage = message.content;
- } else {
- userMessage += "\n" + message.content;
- }
- } else if (message.role === "assistant") {
- if (!assistantMessage) {
- assistantMessage = message.content;
- } else {
- assistantMessage += "\n" + message.content;
- }
- }
-
- if (userMessage && assistantMessage) {
- turns.push({
- user: userMessage,
- assistant: assistantMessage,
- });
- userMessage = null;
- assistantMessage = null;
- }
- }
-
- return turns;
-}
-
-// readChunks() reads from the provided reader and yields the results into an async iterable
-// https://css-tricks.com/web-streams-everywhere-and-fetch-for-node-js/
-function readChunks(reader) {
- return {
- async *[Symbol.asyncIterator]() {
- let readResult = await reader.read();
- while (!readResult.done) {
- yield readResult.value;
- readResult = await reader.read();
- }
- },
- };
-}
-
-function downloadModel(modelName, options = {}) {
- const downloadOptions = {
- modelPath: DEFAULT_DIRECTORY,
- verbose: false,
- ...options,
- };
-
- const modelFileName = appendBinSuffixIfMissing(modelName);
- const partialModelPath = path.join(
- downloadOptions.modelPath,
- modelName + ".part"
- );
- const finalModelPath = path.join(downloadOptions.modelPath, modelFileName);
- const modelUrl =
- downloadOptions.url ??
- `https://gpt4all.io/models/gguf/${modelFileName}`;
-
- mkdirSync(downloadOptions.modelPath, { recursive: true });
-
- if (existsSync(finalModelPath)) {
- throw Error(`Model already exists at ${finalModelPath}`);
- }
-
- if (downloadOptions.verbose) {
- console.debug(`Downloading ${modelName} from ${modelUrl}`);
- }
-
- const headers = {
- "Accept-Ranges": "arraybuffer",
- "Response-Type": "arraybuffer",
- };
-
- const writeStreamOpts = {};
-
- if (existsSync(partialModelPath)) {
- if (downloadOptions.verbose) {
- console.debug("Partial model exists, resuming download...");
- }
- const startRange = statSync(partialModelPath).size;
- headers["Range"] = `bytes=${startRange}-`;
- writeStreamOpts.flags = "a";
- }
-
- const abortController = new AbortController();
- const signal = abortController.signal;
-
- const finalizeDownload = async () => {
- if (downloadOptions.md5sum) {
- const fileHash = await md5File(partialModelPath);
- if (fileHash !== downloadOptions.md5sum) {
- await fsp.unlink(partialModelPath);
- const message = `Model "${modelName}" failed verification: Hashes mismatch. Expected ${downloadOptions.md5sum}, got ${fileHash}`;
- throw Error(message);
- }
- if (downloadOptions.verbose) {
- console.debug(`MD5 hash verified: ${fileHash}`);
- }
- }
-
- await fsp.rename(partialModelPath, finalModelPath);
- };
-
- // a promise that executes and writes to a stream. Resolves to the path the model was downloaded to when done writing.
- const downloadPromise = new Promise((resolve, reject) => {
- let timestampStart;
-
- if (downloadOptions.verbose) {
- console.debug(`Downloading @ ${partialModelPath} ...`);
- timestampStart = performance.now();
- }
-
- const writeStream = createWriteStream(
- partialModelPath,
- writeStreamOpts
- );
-
- writeStream.on("error", (e) => {
- writeStream.close();
- reject(e);
- });
-
- writeStream.on("finish", () => {
- if (downloadOptions.verbose) {
- const elapsed = performance.now() - timestampStart;
- console.log(`Finished. Download took ${elapsed.toFixed(2)} ms`);
- }
-
- finalizeDownload()
- .then(() => {
- resolve(finalModelPath);
- })
- .catch(reject);
- });
-
- fetch(modelUrl, {
- signal,
- headers,
- })
- .then((res) => {
- if (!res.ok) {
- const message = `Failed to download model from ${modelUrl} - ${res.status} ${res.statusText}`;
- reject(Error(message));
- }
- return res.body.getReader();
- })
- .then(async (reader) => {
- for await (const chunk of readChunks(reader)) {
- writeStream.write(chunk);
- }
- writeStream.end();
- })
- .catch(reject);
- });
-
- return {
- cancel: () => abortController.abort(),
- promise: downloadPromise,
- };
-}
-
-async function retrieveModel(modelName, options = {}) {
- const retrieveOptions = {
- modelPath: DEFAULT_DIRECTORY,
- allowDownload: true,
- verbose: false,
- ...options,
- };
- mkdirSync(retrieveOptions.modelPath, { recursive: true });
-
- const modelFileName = appendBinSuffixIfMissing(modelName);
- const fullModelPath = path.join(retrieveOptions.modelPath, modelFileName);
- const modelExists = existsSync(fullModelPath);
-
- let config = { ...DEFAULT_MODEL_CONFIG };
-
- const availableModels = await listModels({
- file: retrieveOptions.modelConfigFile,
- url:
- retrieveOptions.allowDownload &&
- "https://gpt4all.io/models/models3.json",
- });
-
- const loadedModelConfig = availableModels.find(
- (model) => model.filename === modelFileName
- );
-
- if (loadedModelConfig) {
- config = {
- ...config,
- ...loadedModelConfig,
- };
- } else {
- // if there's no local modelConfigFile specified, and allowDownload is false, the default model config will be used.
- // warning the user here because the model may not work as expected.
- console.warn(
- `Failed to load model config for ${modelName}. Using defaults.`
- );
- }
-
- config.systemPrompt = config.systemPrompt.trim();
-
- if (modelExists) {
- config.path = fullModelPath;
-
- if (retrieveOptions.verbose) {
- console.debug(`Found ${modelName} at ${fullModelPath}`);
- }
- } else if (retrieveOptions.allowDownload) {
- const downloadController = downloadModel(modelName, {
- modelPath: retrieveOptions.modelPath,
- verbose: retrieveOptions.verbose,
- filesize: config.filesize,
- url: config.url,
- md5sum: config.md5sum,
- });
-
- const downloadPath = await downloadController.promise;
- config.path = downloadPath;
-
- if (retrieveOptions.verbose) {
- console.debug(`Model downloaded to ${downloadPath}`);
- }
- } else {
- throw Error("Failed to retrieve model.");
- }
- return config;
-}
-
-module.exports = {
- appendBinSuffixIfMissing,
- prepareMessagesForIngest,
- downloadModel,
- retrieveModel,
- listModels,
-};
diff --git a/gpt4all-bindings/typescript/test/gpt4all.test.js b/gpt4all-bindings/typescript/test/gpt4all.test.js
deleted file mode 100644
index 6d987a3f..00000000
--- a/gpt4all-bindings/typescript/test/gpt4all.test.js
+++ /dev/null
@@ -1,205 +0,0 @@
-const path = require("node:path");
-const os = require("node:os");
-const fsp = require("node:fs/promises");
-const { existsSync } = require('node:fs');
-const { LLModel } = require("node-gyp-build")(path.resolve(__dirname, ".."));
-const {
- listModels,
- downloadModel,
- appendBinSuffixIfMissing,
-} = require("../src/util.js");
-const {
- DEFAULT_DIRECTORY,
- DEFAULT_LIBRARIES_DIRECTORY,
- DEFAULT_MODEL_LIST_URL,
-} = require("../src/config.js");
-const {
- loadModel,
- createPrompt,
- createCompletion,
-} = require("../src/gpt4all.js");
-
-describe("config", () => {
- test("default paths constants are available and correct", () => {
- expect(DEFAULT_DIRECTORY).toBe(
- path.resolve(os.homedir(), ".cache/gpt4all")
- );
- const paths = [
- path.join(DEFAULT_DIRECTORY, "libraries"),
- path.resolve("./libraries"),
- path.resolve(
- __dirname,
- "..",
- `runtimes/${process.platform}-${process.arch}/native`
- ),
- path.resolve(
- __dirname,
- "..",
- `runtimes/${process.platform}/native`,
- ),
- process.cwd(),
- ];
- expect(typeof DEFAULT_LIBRARIES_DIRECTORY).toBe("string");
- expect(DEFAULT_LIBRARIES_DIRECTORY).toBe(paths.join(";"));
- });
-});
-
-describe("listModels", () => {
- const fakeModels = require("./models.json");
- const fakeModel = fakeModels[0];
- const mockResponse = JSON.stringify([fakeModel]);
-
- let mockFetch, originalFetch;
-
- beforeAll(() => {
- // Mock the fetch function for all tests
- mockFetch = jest.fn().mockResolvedValue({
- ok: true,
- json: () => JSON.parse(mockResponse),
- });
- originalFetch = global.fetch;
- global.fetch = mockFetch;
- });
-
- afterEach(() => {
- // Reset the fetch counter after each test
- mockFetch.mockClear();
- });
- afterAll(() => {
- // Restore fetch
- global.fetch = originalFetch;
- });
-
- it("should load the model list from remote when called without args", async () => {
- const models = await listModels();
- expect(fetch).toHaveBeenCalledTimes(1);
- expect(fetch).toHaveBeenCalledWith(DEFAULT_MODEL_LIST_URL);
- expect(models[0]).toEqual(fakeModel);
- });
-
- it("should load the model list from a local file, if specified", async () => {
- const models = await listModels({
- file: path.resolve(__dirname, "models.json"),
- });
- expect(fetch).toHaveBeenCalledTimes(0);
- expect(models[0]).toEqual(fakeModel);
- });
-
- it("should throw an error if neither url nor file is specified", async () => {
- await expect(listModels(null)).rejects.toThrow(
- "No model list source specified. Please specify either a url or a file."
- );
- });
-});
-
-describe("appendBinSuffixIfMissing", () => {
- it("should make sure the suffix is there", () => {
- expect(appendBinSuffixIfMissing("filename")).toBe("filename.gguf");
- expect(appendBinSuffixIfMissing("filename.bin")).toBe("filename.bin");
- });
-});
-
-describe("downloadModel", () => {
- let mockAbortController, mockFetch;
- const fakeModelName = "fake-model";
-
- const createMockFetch = () => {
- const mockData = new Uint8Array([1, 2, 3, 4]);
- const mockResponse = new ReadableStream({
- start(controller) {
- controller.enqueue(mockData);
- controller.close();
- },
- });
- const mockFetchImplementation = jest.fn(() =>
- Promise.resolve({
- ok: true,
- body: mockResponse,
- })
- );
- return mockFetchImplementation;
- };
-
- beforeEach(async () => {
- // Mocking the AbortController constructor
- mockAbortController = jest.fn();
- global.AbortController = mockAbortController;
- mockAbortController.mockReturnValue({
- signal: "signal",
- abort: jest.fn(),
- });
- mockFetch = createMockFetch();
- jest.spyOn(global, "fetch").mockImplementation(mockFetch);
-
- });
-
- afterEach(async () => {
- // Clean up mocks
- mockAbortController.mockReset();
- mockFetch.mockClear();
- global.fetch.mockRestore();
-
- const rootDefaultPath = path.resolve(DEFAULT_DIRECTORY),
- partialPath = path.resolve(rootDefaultPath, fakeModelName+'.part'),
- fullPath = path.resolve(rootDefaultPath, fakeModelName+'.bin')
-
- //if tests fail, remove the created files
- // acts as cleanup if tests fail
- //
- if(existsSync(fullPath)) {
- await fsp.rm(fullPath)
- }
- if(existsSync(partialPath)) {
- await fsp.rm(partialPath)
- }
-
- });
-
- test("should successfully download a model file", async () => {
- const downloadController = downloadModel(fakeModelName);
- const modelFilePath = await downloadController.promise;
- expect(modelFilePath).toBe(path.resolve(DEFAULT_DIRECTORY, `${fakeModelName}.gguf`));
-
- expect(global.fetch).toHaveBeenCalledTimes(1);
- expect(global.fetch).toHaveBeenCalledWith(
- "https://gpt4all.io/models/gguf/fake-model.gguf",
- {
- signal: "signal",
- headers: {
- "Accept-Ranges": "arraybuffer",
- "Response-Type": "arraybuffer",
- },
- }
- );
-
- // final model file should be present
- await expect(fsp.access(modelFilePath)).resolves.not.toThrow();
-
- // remove the testing model file
- await fsp.unlink(modelFilePath);
- });
-
- test("should error and cleanup if md5sum is not matching", async () => {
- const downloadController = downloadModel(fakeModelName, {
- md5sum: "wrong-md5sum",
- });
- // the promise should reject with a mismatch
- await expect(downloadController.promise).rejects.toThrow(
- `Model "fake-model" failed verification: Hashes mismatch. Expected wrong-md5sum, got 08d6c05a21512a79a1dfeb9d2a8f262f`
- );
- // fetch should have been called
- expect(global.fetch).toHaveBeenCalledTimes(1);
- // the file should be missing
- await expect(
- fsp.access(path.resolve(DEFAULT_DIRECTORY, `${fakeModelName}.gguf`))
- ).rejects.toThrow();
- // partial file should also be missing
- await expect(
- fsp.access(path.resolve(DEFAULT_DIRECTORY, `${fakeModelName}.part`))
- ).rejects.toThrow();
- });
-
- // TODO
- // test("should be able to cancel and resume a download", async () => {
- // });
-});
diff --git a/gpt4all-bindings/typescript/test/models.json b/gpt4all-bindings/typescript/test/models.json
deleted file mode 100644
index 22975646..00000000
--- a/gpt4all-bindings/typescript/test/models.json
+++ /dev/null
@@ -1,10 +0,0 @@
-[
- {
- "order": "a",
- "md5sum": "08d6c05a21512a79a1dfeb9d2a8f262f",
- "name": "Not a real model",
- "filename": "fake-model.gguf",
- "filesize": "4",
- "systemPrompt": " "
- }
-]
diff --git a/gpt4all-bindings/typescript/yarn.lock b/gpt4all-bindings/typescript/yarn.lock
deleted file mode 100644
index ce9b9803..00000000
--- a/gpt4all-bindings/typescript/yarn.lock
+++ /dev/null
@@ -1,5898 +0,0 @@
-# This file is generated by running "yarn install" inside your project.
-# Manual changes might be lost - proceed with caution!
-
-__metadata:
- version: 6
- cacheKey: 8
-
-"@ampproject/remapping@npm:^2.2.0":
- version: 2.2.1
- resolution: "@ampproject/remapping@npm:2.2.1"
- dependencies:
- "@jridgewell/gen-mapping": ^0.3.0
- "@jridgewell/trace-mapping": ^0.3.9
- checksum: 03c04fd526acc64a1f4df22651186f3e5ef0a9d6d6530ce4482ec9841269cf7a11dbb8af79237c282d721c5312024ff17529cd72cc4768c11e999b58e2302079
- languageName: node
- linkType: hard
-
-"@babel/code-frame@npm:^7.0.0, @babel/code-frame@npm:^7.12.13, @babel/code-frame@npm:^7.22.13, @babel/code-frame@npm:^7.23.5":
- version: 7.23.5
- resolution: "@babel/code-frame@npm:7.23.5"
- dependencies:
- "@babel/highlight": ^7.23.4
- chalk: ^2.4.2
- checksum: d90981fdf56a2824a9b14d19a4c0e8db93633fd488c772624b4e83e0ceac6039a27cd298a247c3214faa952bf803ba23696172ae7e7235f3b97f43ba278c569a
- languageName: node
- linkType: hard
-
-"@babel/compat-data@npm:^7.23.5":
- version: 7.23.5
- resolution: "@babel/compat-data@npm:7.23.5"
- checksum: 06ce244cda5763295a0ea924728c09bae57d35713b675175227278896946f922a63edf803c322f855a3878323d48d0255a2a3023409d2a123483c8a69ebb4744
- languageName: node
- linkType: hard
-
-"@babel/core@npm:^7.11.6, @babel/core@npm:^7.12.3, @babel/core@npm:^7.18.10":
- version: 7.23.6
- resolution: "@babel/core@npm:7.23.6"
- dependencies:
- "@ampproject/remapping": ^2.2.0
- "@babel/code-frame": ^7.23.5
- "@babel/generator": ^7.23.6
- "@babel/helper-compilation-targets": ^7.23.6
- "@babel/helper-module-transforms": ^7.23.3
- "@babel/helpers": ^7.23.6
- "@babel/parser": ^7.23.6
- "@babel/template": ^7.22.15
- "@babel/traverse": ^7.23.6
- "@babel/types": ^7.23.6
- convert-source-map: ^2.0.0
- debug: ^4.1.0
- gensync: ^1.0.0-beta.2
- json5: ^2.2.3
- semver: ^6.3.1
- checksum: 4bddd1b80394a64b2ee33eeb216e8a2a49ad3d74f0ca9ba678c84a37f4502b2540662d72530d78228a2a349fda837fa852eea5cd3ae28465d1188acc6055868e
- languageName: node
- linkType: hard
-
-"@babel/generator@npm:^7.18.10, @babel/generator@npm:^7.23.6, @babel/generator@npm:^7.7.2":
- version: 7.23.6
- resolution: "@babel/generator@npm:7.23.6"
- dependencies:
- "@babel/types": ^7.23.6
- "@jridgewell/gen-mapping": ^0.3.2
- "@jridgewell/trace-mapping": ^0.3.17
- jsesc: ^2.5.1
- checksum: 1a1a1c4eac210f174cd108d479464d053930a812798e09fee069377de39a893422df5b5b146199ead7239ae6d3a04697b45fc9ac6e38e0f6b76374390f91fc6c
- languageName: node
- linkType: hard
-
-"@babel/helper-compilation-targets@npm:^7.23.6":
- version: 7.23.6
- resolution: "@babel/helper-compilation-targets@npm:7.23.6"
- dependencies:
- "@babel/compat-data": ^7.23.5
- "@babel/helper-validator-option": ^7.23.5
- browserslist: ^4.22.2
- lru-cache: ^5.1.1
- semver: ^6.3.1
- checksum: c630b98d4527ac8fe2c58d9a06e785dfb2b73ec71b7c4f2ddf90f814b5f75b547f3c015f110a010fd31f76e3864daaf09f3adcd2f6acdbfb18a8de3a48717590
- languageName: node
- linkType: hard
-
-"@babel/helper-environment-visitor@npm:^7.22.20":
- version: 7.22.20
- resolution: "@babel/helper-environment-visitor@npm:7.22.20"
- checksum: d80ee98ff66f41e233f36ca1921774c37e88a803b2f7dca3db7c057a5fea0473804db9fb6729e5dbfd07f4bed722d60f7852035c2c739382e84c335661590b69
- languageName: node
- linkType: hard
-
-"@babel/helper-function-name@npm:^7.23.0":
- version: 7.23.0
- resolution: "@babel/helper-function-name@npm:7.23.0"
- dependencies:
- "@babel/template": ^7.22.15
- "@babel/types": ^7.23.0
- checksum: e44542257b2d4634a1f979244eb2a4ad8e6d75eb6761b4cfceb56b562f7db150d134bc538c8e6adca3783e3bc31be949071527aa8e3aab7867d1ad2d84a26e10
- languageName: node
- linkType: hard
-
-"@babel/helper-hoist-variables@npm:^7.22.5":
- version: 7.22.5
- resolution: "@babel/helper-hoist-variables@npm:7.22.5"
- dependencies:
- "@babel/types": ^7.22.5
- checksum: 394ca191b4ac908a76e7c50ab52102669efe3a1c277033e49467913c7ed6f7c64d7eacbeabf3bed39ea1f41731e22993f763b1edce0f74ff8563fd1f380d92cc
- languageName: node
- linkType: hard
-
-"@babel/helper-module-imports@npm:^7.22.15":
- version: 7.22.15
- resolution: "@babel/helper-module-imports@npm:7.22.15"
- dependencies:
- "@babel/types": ^7.22.15
- checksum: ecd7e457df0a46f889228f943ef9b4a47d485d82e030676767e6a2fdcbdaa63594d8124d4b55fd160b41c201025aec01fc27580352b1c87a37c9c6f33d116702
- languageName: node
- linkType: hard
-
-"@babel/helper-module-transforms@npm:^7.23.3":
- version: 7.23.3
- resolution: "@babel/helper-module-transforms@npm:7.23.3"
- dependencies:
- "@babel/helper-environment-visitor": ^7.22.20
- "@babel/helper-module-imports": ^7.22.15
- "@babel/helper-simple-access": ^7.22.5
- "@babel/helper-split-export-declaration": ^7.22.6
- "@babel/helper-validator-identifier": ^7.22.20
- peerDependencies:
- "@babel/core": ^7.0.0
- checksum: 5d0895cfba0e16ae16f3aa92fee108517023ad89a855289c4eb1d46f7aef4519adf8e6f971e1d55ac20c5461610e17213f1144097a8f932e768a9132e2278d71
- languageName: node
- linkType: hard
-
-"@babel/helper-plugin-utils@npm:^7.0.0, @babel/helper-plugin-utils@npm:^7.10.4, @babel/helper-plugin-utils@npm:^7.12.13, @babel/helper-plugin-utils@npm:^7.14.5, @babel/helper-plugin-utils@npm:^7.22.5, @babel/helper-plugin-utils@npm:^7.8.0":
- version: 7.22.5
- resolution: "@babel/helper-plugin-utils@npm:7.22.5"
- checksum: c0fc7227076b6041acd2f0e818145d2e8c41968cc52fb5ca70eed48e21b8fe6dd88a0a91cbddf4951e33647336eb5ae184747ca706817ca3bef5e9e905151ff5
- languageName: node
- linkType: hard
-
-"@babel/helper-simple-access@npm:^7.22.5":
- version: 7.22.5
- resolution: "@babel/helper-simple-access@npm:7.22.5"
- dependencies:
- "@babel/types": ^7.22.5
- checksum: fe9686714caf7d70aedb46c3cce090f8b915b206e09225f1e4dbc416786c2fdbbee40b38b23c268b7ccef749dd2db35f255338fb4f2444429874d900dede5ad2
- languageName: node
- linkType: hard
-
-"@babel/helper-split-export-declaration@npm:^7.22.6":
- version: 7.22.6
- resolution: "@babel/helper-split-export-declaration@npm:7.22.6"
- dependencies:
- "@babel/types": ^7.22.5
- checksum: e141cace583b19d9195f9c2b8e17a3ae913b7ee9b8120246d0f9ca349ca6f03cb2c001fd5ec57488c544347c0bb584afec66c936511e447fd20a360e591ac921
- languageName: node
- linkType: hard
-
-"@babel/helper-string-parser@npm:^7.23.4":
- version: 7.23.4
- resolution: "@babel/helper-string-parser@npm:7.23.4"
- checksum: c0641144cf1a7e7dc93f3d5f16d5327465b6cf5d036b48be61ecba41e1eece161b48f46b7f960951b67f8c3533ce506b16dece576baef4d8b3b49f8c65410f90
- languageName: node
- linkType: hard
-
-"@babel/helper-validator-identifier@npm:^7.22.20":
- version: 7.22.20
- resolution: "@babel/helper-validator-identifier@npm:7.22.20"
- checksum: 136412784d9428266bcdd4d91c32bcf9ff0e8d25534a9d94b044f77fe76bc50f941a90319b05aafd1ec04f7d127cd57a179a3716009ff7f3412ef835ada95bdc
- languageName: node
- linkType: hard
-
-"@babel/helper-validator-option@npm:^7.23.5":
- version: 7.23.5
- resolution: "@babel/helper-validator-option@npm:7.23.5"
- checksum: 537cde2330a8aede223552510e8a13e9c1c8798afee3757995a7d4acae564124fe2bf7e7c3d90d62d3657434a74340a274b3b3b1c6f17e9a2be1f48af29cb09e
- languageName: node
- linkType: hard
-
-"@babel/helpers@npm:^7.23.6":
- version: 7.23.6
- resolution: "@babel/helpers@npm:7.23.6"
- dependencies:
- "@babel/template": ^7.22.15
- "@babel/traverse": ^7.23.6
- "@babel/types": ^7.23.6
- checksum: c5ba62497e1d717161d107c4b3de727565c68b6b9f50f59d6298e613afeca8895799b227c256e06d362e565aec34e26fb5c675b9c3d25055c52b945a21c21e21
- languageName: node
- linkType: hard
-
-"@babel/highlight@npm:^7.23.4":
- version: 7.23.4
- resolution: "@babel/highlight@npm:7.23.4"
- dependencies:
- "@babel/helper-validator-identifier": ^7.22.20
- chalk: ^2.4.2
- js-tokens: ^4.0.0
- checksum: 643acecdc235f87d925979a979b539a5d7d1f31ae7db8d89047269082694122d11aa85351304c9c978ceeb6d250591ccadb06c366f358ccee08bb9c122476b89
- languageName: node
- linkType: hard
-
-"@babel/parser@npm:^7.1.0, @babel/parser@npm:^7.10.5, @babel/parser@npm:^7.14.7, @babel/parser@npm:^7.18.11, @babel/parser@npm:^7.20.7, @babel/parser@npm:^7.22.15, @babel/parser@npm:^7.23.5, @babel/parser@npm:^7.23.6":
- version: 7.23.6
- resolution: "@babel/parser@npm:7.23.6"
- bin:
- parser: ./bin/babel-parser.js
- checksum: 140801c43731a6c41fd193f5c02bc71fd647a0360ca616b23d2db8be4b9739b9f951a03fc7c2db4f9b9214f4b27c1074db0f18bc3fa653783082d5af7c8860d5
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-async-generators@npm:^7.8.4":
- version: 7.8.4
- resolution: "@babel/plugin-syntax-async-generators@npm:7.8.4"
- dependencies:
- "@babel/helper-plugin-utils": ^7.8.0
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: 7ed1c1d9b9e5b64ef028ea5e755c0be2d4e5e4e3d6cf7df757b9a8c4cfa4193d268176d0f1f7fbecdda6fe722885c7fda681f480f3741d8a2d26854736f05367
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-bigint@npm:^7.8.3":
- version: 7.8.3
- resolution: "@babel/plugin-syntax-bigint@npm:7.8.3"
- dependencies:
- "@babel/helper-plugin-utils": ^7.8.0
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: 3a10849d83e47aec50f367a9e56a6b22d662ddce643334b087f9828f4c3dd73bdc5909aaeabe123fed78515767f9ca43498a0e621c438d1cd2802d7fae3c9648
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-class-properties@npm:^7.8.3":
- version: 7.12.13
- resolution: "@babel/plugin-syntax-class-properties@npm:7.12.13"
- dependencies:
- "@babel/helper-plugin-utils": ^7.12.13
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: 24f34b196d6342f28d4bad303612d7ff566ab0a013ce89e775d98d6f832969462e7235f3e7eaf17678a533d4be0ba45d3ae34ab4e5a9dcbda5d98d49e5efa2fc
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-import-meta@npm:^7.8.3":
- version: 7.10.4
- resolution: "@babel/plugin-syntax-import-meta@npm:7.10.4"
- dependencies:
- "@babel/helper-plugin-utils": ^7.10.4
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: 166ac1125d10b9c0c430e4156249a13858c0366d38844883d75d27389621ebe651115cb2ceb6dc011534d5055719fa1727b59f39e1ab3ca97820eef3dcab5b9b
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-json-strings@npm:^7.8.3":
- version: 7.8.3
- resolution: "@babel/plugin-syntax-json-strings@npm:7.8.3"
- dependencies:
- "@babel/helper-plugin-utils": ^7.8.0
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: bf5aea1f3188c9a507e16efe030efb996853ca3cadd6512c51db7233cc58f3ac89ff8c6bdfb01d30843b161cfe7d321e1bf28da82f7ab8d7e6bc5464666f354a
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-jsx@npm:^7.7.2":
- version: 7.23.3
- resolution: "@babel/plugin-syntax-jsx@npm:7.23.3"
- dependencies:
- "@babel/helper-plugin-utils": ^7.22.5
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: 89037694314a74e7f0e7a9c8d3793af5bf6b23d80950c29b360db1c66859d67f60711ea437e70ad6b5b4b29affe17eababda841b6c01107c2b638e0493bafb4e
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-logical-assignment-operators@npm:^7.8.3":
- version: 7.10.4
- resolution: "@babel/plugin-syntax-logical-assignment-operators@npm:7.10.4"
- dependencies:
- "@babel/helper-plugin-utils": ^7.10.4
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: aff33577037e34e515911255cdbb1fd39efee33658aa00b8a5fd3a4b903585112d037cce1cc9e4632f0487dc554486106b79ccd5ea63a2e00df4363f6d4ff886
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-nullish-coalescing-operator@npm:^7.8.3":
- version: 7.8.3
- resolution: "@babel/plugin-syntax-nullish-coalescing-operator@npm:7.8.3"
- dependencies:
- "@babel/helper-plugin-utils": ^7.8.0
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: 87aca4918916020d1fedba54c0e232de408df2644a425d153be368313fdde40d96088feed6c4e5ab72aac89be5d07fef2ddf329a15109c5eb65df006bf2580d1
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-numeric-separator@npm:^7.8.3":
- version: 7.10.4
- resolution: "@babel/plugin-syntax-numeric-separator@npm:7.10.4"
- dependencies:
- "@babel/helper-plugin-utils": ^7.10.4
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: 01ec5547bd0497f76cc903ff4d6b02abc8c05f301c88d2622b6d834e33a5651aa7c7a3d80d8d57656a4588f7276eba357f6b7e006482f5b564b7a6488de493a1
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-object-rest-spread@npm:^7.8.3":
- version: 7.8.3
- resolution: "@babel/plugin-syntax-object-rest-spread@npm:7.8.3"
- dependencies:
- "@babel/helper-plugin-utils": ^7.8.0
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: fddcf581a57f77e80eb6b981b10658421bc321ba5f0a5b754118c6a92a5448f12a0c336f77b8abf734841e102e5126d69110a306eadb03ca3e1547cab31f5cbf
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-optional-catch-binding@npm:^7.8.3":
- version: 7.8.3
- resolution: "@babel/plugin-syntax-optional-catch-binding@npm:7.8.3"
- dependencies:
- "@babel/helper-plugin-utils": ^7.8.0
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: 910d90e72bc90ea1ce698e89c1027fed8845212d5ab588e35ef91f13b93143845f94e2539d831dc8d8ededc14ec02f04f7bd6a8179edd43a326c784e7ed7f0b9
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-optional-chaining@npm:^7.8.3":
- version: 7.8.3
- resolution: "@babel/plugin-syntax-optional-chaining@npm:7.8.3"
- dependencies:
- "@babel/helper-plugin-utils": ^7.8.0
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: eef94d53a1453361553c1f98b68d17782861a04a392840341bc91780838dd4e695209c783631cf0de14c635758beafb6a3a65399846ffa4386bff90639347f30
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-top-level-await@npm:^7.8.3":
- version: 7.14.5
- resolution: "@babel/plugin-syntax-top-level-await@npm:7.14.5"
- dependencies:
- "@babel/helper-plugin-utils": ^7.14.5
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: bbd1a56b095be7820029b209677b194db9b1d26691fe999856462e66b25b281f031f3dfd91b1619e9dcf95bebe336211833b854d0fb8780d618e35667c2d0d7e
- languageName: node
- linkType: hard
-
-"@babel/plugin-syntax-typescript@npm:^7.7.2":
- version: 7.23.3
- resolution: "@babel/plugin-syntax-typescript@npm:7.23.3"
- dependencies:
- "@babel/helper-plugin-utils": ^7.22.5
- peerDependencies:
- "@babel/core": ^7.0.0-0
- checksum: abfad3a19290d258b028e285a1f34c9b8a0cbe46ef79eafed4ed7ffce11b5d0720b5e536c82f91cbd8442cde35a3dd8e861fa70366d87ff06fdc0d4756e30876
- languageName: node
- linkType: hard
-
-"@babel/template@npm:^7.22.15, @babel/template@npm:^7.3.3":
- version: 7.22.15
- resolution: "@babel/template@npm:7.22.15"
- dependencies:
- "@babel/code-frame": ^7.22.13
- "@babel/parser": ^7.22.15
- "@babel/types": ^7.22.15
- checksum: 1f3e7dcd6c44f5904c184b3f7fe280394b191f2fed819919ffa1e529c259d5b197da8981b6ca491c235aee8dbad4a50b7e31304aa531271cb823a4a24a0dd8fd
- languageName: node
- linkType: hard
-
-"@babel/traverse@npm:^7.10.5, @babel/traverse@npm:^7.18.11, @babel/traverse@npm:^7.23.6":
- version: 7.23.6
- resolution: "@babel/traverse@npm:7.23.6"
- dependencies:
- "@babel/code-frame": ^7.23.5
- "@babel/generator": ^7.23.6
- "@babel/helper-environment-visitor": ^7.22.20
- "@babel/helper-function-name": ^7.23.0
- "@babel/helper-hoist-variables": ^7.22.5
- "@babel/helper-split-export-declaration": ^7.22.6
- "@babel/parser": ^7.23.6
- "@babel/types": ^7.23.6
- debug: ^4.3.1
- globals: ^11.1.0
- checksum: 48f2eac0e86b6cb60dab13a5ea6a26ba45c450262fccdffc334c01089e75935f7546be195e260e97f6e43cea419862eda095018531a2718fef8189153d479f88
- languageName: node
- linkType: hard
-
-"@babel/types@npm:^7.0.0, @babel/types@npm:^7.18.10, @babel/types@npm:^7.20.7, @babel/types@npm:^7.22.15, @babel/types@npm:^7.22.5, @babel/types@npm:^7.23.0, @babel/types@npm:^7.23.6, @babel/types@npm:^7.3.3":
- version: 7.23.6
- resolution: "@babel/types@npm:7.23.6"
- dependencies:
- "@babel/helper-string-parser": ^7.23.4
- "@babel/helper-validator-identifier": ^7.22.20
- to-fast-properties: ^2.0.0
- checksum: 68187dbec0d637f79bc96263ac95ec8b06d424396678e7e225492be866414ce28ebc918a75354d4c28659be6efe30020b4f0f6df81cc418a2d30645b690a8de0
- languageName: node
- linkType: hard
-
-"@babel/types@npm:^7.8.3":
- version: 7.23.9
- resolution: "@babel/types@npm:7.23.9"
- dependencies:
- "@babel/helper-string-parser": ^7.23.4
- "@babel/helper-validator-identifier": ^7.22.20
- to-fast-properties: ^2.0.0
- checksum: 0a9b008e9bfc89beb8c185e620fa0f8ed6c771f1e1b2e01e1596870969096fec7793898a1d64a035176abf1dd13e2668ee30bf699f2d92c210a8128f4b151e65
- languageName: node
- linkType: hard
-
-"@bcoe/v8-coverage@npm:^0.2.3":
- version: 0.2.3
- resolution: "@bcoe/v8-coverage@npm:0.2.3"
- checksum: 850f9305536d0f2bd13e9e0881cb5f02e4f93fad1189f7b2d4bebf694e3206924eadee1068130d43c11b750efcc9405f88a8e42ef098b6d75239c0f047de1a27
- languageName: node
- linkType: hard
-
-"@gar/promisify@npm:^1.1.3":
- version: 1.1.3
- resolution: "@gar/promisify@npm:1.1.3"
- checksum: 4059f790e2d07bf3c3ff3e0fec0daa8144fe35c1f6e0111c9921bd32106adaa97a4ab096ad7dab1e28ee6a9060083c4d1a4ada42a7f5f3f7a96b8812e2b757c1
- languageName: node
- linkType: hard
-
-"@isaacs/cliui@npm:^8.0.2":
- version: 8.0.2
- resolution: "@isaacs/cliui@npm:8.0.2"
- dependencies:
- string-width: ^5.1.2
- string-width-cjs: "npm:string-width@^4.2.0"
- strip-ansi: ^7.0.1
- strip-ansi-cjs: "npm:strip-ansi@^6.0.1"
- wrap-ansi: ^8.1.0
- wrap-ansi-cjs: "npm:wrap-ansi@^7.0.0"
- checksum: 4a473b9b32a7d4d3cfb7a614226e555091ff0c5a29a1734c28c72a182c2f6699b26fc6b5c2131dfd841e86b185aea714c72201d7c98c2fba5f17709333a67aeb
- languageName: node
- linkType: hard
-
-"@istanbuljs/load-nyc-config@npm:^1.0.0":
- version: 1.1.0
- resolution: "@istanbuljs/load-nyc-config@npm:1.1.0"
- dependencies:
- camelcase: ^5.3.1
- find-up: ^4.1.0
- get-package-type: ^0.1.0
- js-yaml: ^3.13.1
- resolve-from: ^5.0.0
- checksum: d578da5e2e804d5c93228450a1380e1a3c691de4953acc162f387b717258512a3e07b83510a936d9fab03eac90817473917e24f5d16297af3867f59328d58568
- languageName: node
- linkType: hard
-
-"@istanbuljs/schema@npm:^0.1.2":
- version: 0.1.3
- resolution: "@istanbuljs/schema@npm:0.1.3"
- checksum: 5282759d961d61350f33d9118d16bcaed914ebf8061a52f4fa474b2cb08720c9c81d165e13b82f2e5a8a212cc5af482f0c6fc1ac27b9e067e5394c9a6ed186c9
- languageName: node
- linkType: hard
-
-"@jest/console@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/console@npm:29.7.0"
- dependencies:
- "@jest/types": ^29.6.3
- "@types/node": "*"
- chalk: ^4.0.0
- jest-message-util: ^29.7.0
- jest-util: ^29.7.0
- slash: ^3.0.0
- checksum: 0e3624e32c5a8e7361e889db70b170876401b7d70f509a2538c31d5cd50deb0c1ae4b92dc63fe18a0902e0a48c590c21d53787a0df41a52b34fa7cab96c384d6
- languageName: node
- linkType: hard
-
-"@jest/core@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/core@npm:29.7.0"
- dependencies:
- "@jest/console": ^29.7.0
- "@jest/reporters": ^29.7.0
- "@jest/test-result": ^29.7.0
- "@jest/transform": ^29.7.0
- "@jest/types": ^29.6.3
- "@types/node": "*"
- ansi-escapes: ^4.2.1
- chalk: ^4.0.0
- ci-info: ^3.2.0
- exit: ^0.1.2
- graceful-fs: ^4.2.9
- jest-changed-files: ^29.7.0
- jest-config: ^29.7.0
- jest-haste-map: ^29.7.0
- jest-message-util: ^29.7.0
- jest-regex-util: ^29.6.3
- jest-resolve: ^29.7.0
- jest-resolve-dependencies: ^29.7.0
- jest-runner: ^29.7.0
- jest-runtime: ^29.7.0
- jest-snapshot: ^29.7.0
- jest-util: ^29.7.0
- jest-validate: ^29.7.0
- jest-watcher: ^29.7.0
- micromatch: ^4.0.4
- pretty-format: ^29.7.0
- slash: ^3.0.0
- strip-ansi: ^6.0.0
- peerDependencies:
- node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0
- peerDependenciesMeta:
- node-notifier:
- optional: true
- checksum: af759c9781cfc914553320446ce4e47775ae42779e73621c438feb1e4231a5d4862f84b1d8565926f2d1aab29b3ec3dcfdc84db28608bdf5f29867124ebcfc0d
- languageName: node
- linkType: hard
-
-"@jest/environment@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/environment@npm:29.7.0"
- dependencies:
- "@jest/fake-timers": ^29.7.0
- "@jest/types": ^29.6.3
- "@types/node": "*"
- jest-mock: ^29.7.0
- checksum: 6fb398143b2543d4b9b8d1c6dbce83fa5247f84f550330604be744e24c2bd2178bb893657d62d1b97cf2f24baf85c450223f8237cccb71192c36a38ea2272934
- languageName: node
- linkType: hard
-
-"@jest/expect-utils@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/expect-utils@npm:29.7.0"
- dependencies:
- jest-get-type: ^29.6.3
- checksum: 75eb177f3d00b6331bcaa057e07c0ccb0733a1d0a1943e1d8db346779039cb7f103789f16e502f888a3096fb58c2300c38d1f3748b36a7fa762eb6f6d1b160ed
- languageName: node
- linkType: hard
-
-"@jest/expect@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/expect@npm:29.7.0"
- dependencies:
- expect: ^29.7.0
- jest-snapshot: ^29.7.0
- checksum: a01cb85fd9401bab3370618f4b9013b90c93536562222d920e702a0b575d239d74cecfe98010aaec7ad464f67cf534a353d92d181646a4b792acaa7e912ae55e
- languageName: node
- linkType: hard
-
-"@jest/fake-timers@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/fake-timers@npm:29.7.0"
- dependencies:
- "@jest/types": ^29.6.3
- "@sinonjs/fake-timers": ^10.0.2
- "@types/node": "*"
- jest-message-util: ^29.7.0
- jest-mock: ^29.7.0
- jest-util: ^29.7.0
- checksum: caf2bbd11f71c9241b458d1b5a66cbe95debc5a15d96442444b5d5c7ba774f523c76627c6931cca5e10e76f0d08761f6f1f01a608898f4751a0eee54fc3d8d00
- languageName: node
- linkType: hard
-
-"@jest/globals@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/globals@npm:29.7.0"
- dependencies:
- "@jest/environment": ^29.7.0
- "@jest/expect": ^29.7.0
- "@jest/types": ^29.6.3
- jest-mock: ^29.7.0
- checksum: 97dbb9459135693ad3a422e65ca1c250f03d82b2a77f6207e7fa0edd2c9d2015fbe4346f3dc9ebff1678b9d8da74754d4d440b7837497f8927059c0642a22123
- languageName: node
- linkType: hard
-
-"@jest/reporters@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/reporters@npm:29.7.0"
- dependencies:
- "@bcoe/v8-coverage": ^0.2.3
- "@jest/console": ^29.7.0
- "@jest/test-result": ^29.7.0
- "@jest/transform": ^29.7.0
- "@jest/types": ^29.6.3
- "@jridgewell/trace-mapping": ^0.3.18
- "@types/node": "*"
- chalk: ^4.0.0
- collect-v8-coverage: ^1.0.0
- exit: ^0.1.2
- glob: ^7.1.3
- graceful-fs: ^4.2.9
- istanbul-lib-coverage: ^3.0.0
- istanbul-lib-instrument: ^6.0.0
- istanbul-lib-report: ^3.0.0
- istanbul-lib-source-maps: ^4.0.0
- istanbul-reports: ^3.1.3
- jest-message-util: ^29.7.0
- jest-util: ^29.7.0
- jest-worker: ^29.7.0
- slash: ^3.0.0
- string-length: ^4.0.1
- strip-ansi: ^6.0.0
- v8-to-istanbul: ^9.0.1
- peerDependencies:
- node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0
- peerDependenciesMeta:
- node-notifier:
- optional: true
- checksum: 7eadabd62cc344f629024b8a268ecc8367dba756152b761bdcb7b7e570a3864fc51b2a9810cd310d85e0a0173ef002ba4528d5ea0329fbf66ee2a3ada9c40455
- languageName: node
- linkType: hard
-
-"@jest/schemas@npm:^29.6.3":
- version: 29.6.3
- resolution: "@jest/schemas@npm:29.6.3"
- dependencies:
- "@sinclair/typebox": ^0.27.8
- checksum: 910040425f0fc93cd13e68c750b7885590b8839066dfa0cd78e7def07bbb708ad869381f725945d66f2284de5663bbecf63e8fdd856e2ae6e261ba30b1687e93
- languageName: node
- linkType: hard
-
-"@jest/source-map@npm:^29.6.3":
- version: 29.6.3
- resolution: "@jest/source-map@npm:29.6.3"
- dependencies:
- "@jridgewell/trace-mapping": ^0.3.18
- callsites: ^3.0.0
- graceful-fs: ^4.2.9
- checksum: bcc5a8697d471396c0003b0bfa09722c3cd879ad697eb9c431e6164e2ea7008238a01a07193dfe3cbb48b1d258eb7251f6efcea36f64e1ebc464ea3c03ae2deb
- languageName: node
- linkType: hard
-
-"@jest/test-result@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/test-result@npm:29.7.0"
- dependencies:
- "@jest/console": ^29.7.0
- "@jest/types": ^29.6.3
- "@types/istanbul-lib-coverage": ^2.0.0
- collect-v8-coverage: ^1.0.0
- checksum: 67b6317d526e335212e5da0e768e3b8ab8a53df110361b80761353ad23b6aea4432b7c5665bdeb87658ea373b90fb1afe02ed3611ef6c858c7fba377505057fa
- languageName: node
- linkType: hard
-
-"@jest/test-sequencer@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/test-sequencer@npm:29.7.0"
- dependencies:
- "@jest/test-result": ^29.7.0
- graceful-fs: ^4.2.9
- jest-haste-map: ^29.7.0
- slash: ^3.0.0
- checksum: 73f43599017946be85c0b6357993b038f875b796e2f0950487a82f4ebcb115fa12131932dd9904026b4ad8be131fe6e28bd8d0aa93b1563705185f9804bff8bd
- languageName: node
- linkType: hard
-
-"@jest/transform@npm:^29.7.0":
- version: 29.7.0
- resolution: "@jest/transform@npm:29.7.0"
- dependencies:
- "@babel/core": ^7.11.6
- "@jest/types": ^29.6.3
- "@jridgewell/trace-mapping": ^0.3.18
- babel-plugin-istanbul: ^6.1.1
- chalk: ^4.0.0
- convert-source-map: ^2.0.0
- fast-json-stable-stringify: ^2.1.0
- graceful-fs: ^4.2.9
- jest-haste-map: ^29.7.0
- jest-regex-util: ^29.6.3
- jest-util: ^29.7.0
- micromatch: ^4.0.4
- pirates: ^4.0.4
- slash: ^3.0.0
- write-file-atomic: ^4.0.2
- checksum: 0f8ac9f413903b3cb6d240102db848f2a354f63971ab885833799a9964999dd51c388162106a807f810071f864302cdd8e3f0c241c29ce02d85a36f18f3f40ab
- languageName: node
- linkType: hard
-
-"@jest/types@npm:^29.6.3":
- version: 29.6.3
- resolution: "@jest/types@npm:29.6.3"
- dependencies:
- "@jest/schemas": ^29.6.3
- "@types/istanbul-lib-coverage": ^2.0.0
- "@types/istanbul-reports": ^3.0.0
- "@types/node": "*"
- "@types/yargs": ^17.0.8
- chalk: ^4.0.0
- checksum: a0bcf15dbb0eca6bdd8ce61a3fb055349d40268622a7670a3b2eb3c3dbafe9eb26af59938366d520b86907b9505b0f9b29b85cec11579a9e580694b87cd90fcc
- languageName: node
- linkType: hard
-
-"@jridgewell/gen-mapping@npm:^0.3.0, @jridgewell/gen-mapping@npm:^0.3.2":
- version: 0.3.3
- resolution: "@jridgewell/gen-mapping@npm:0.3.3"
- dependencies:
- "@jridgewell/set-array": ^1.0.1
- "@jridgewell/sourcemap-codec": ^1.4.10
- "@jridgewell/trace-mapping": ^0.3.9
- checksum: 4a74944bd31f22354fc01c3da32e83c19e519e3bbadafa114f6da4522ea77dd0c2842607e923a591d60a76699d819a2fbb6f3552e277efdb9b58b081390b60ab
- languageName: node
- linkType: hard
-
-"@jridgewell/resolve-uri@npm:^3.1.0":
- version: 3.1.1
- resolution: "@jridgewell/resolve-uri@npm:3.1.1"
- checksum: f5b441fe7900eab4f9155b3b93f9800a916257f4e8563afbcd3b5a5337b55e52bd8ae6735453b1b745457d9f6cdb16d74cd6220bbdd98cf153239e13f6cbb653
- languageName: node
- linkType: hard
-
-"@jridgewell/set-array@npm:^1.0.1":
- version: 1.1.2
- resolution: "@jridgewell/set-array@npm:1.1.2"
- checksum: 69a84d5980385f396ff60a175f7177af0b8da4ddb81824cb7016a9ef914eee9806c72b6b65942003c63f7983d4f39a5c6c27185bbca88eb4690b62075602e28e
- languageName: node
- linkType: hard
-
-"@jridgewell/sourcemap-codec@npm:^1.4.10, @jridgewell/sourcemap-codec@npm:^1.4.14, @jridgewell/sourcemap-codec@npm:^1.4.15":
- version: 1.4.15
- resolution: "@jridgewell/sourcemap-codec@npm:1.4.15"
- checksum: b881c7e503db3fc7f3c1f35a1dd2655a188cc51a3612d76efc8a6eb74728bef5606e6758ee77423e564092b4a518aba569bbb21c9bac5ab7a35b0c6ae7e344c8
- languageName: node
- linkType: hard
-
-"@jridgewell/trace-mapping@npm:^0.3.12, @jridgewell/trace-mapping@npm:^0.3.17, @jridgewell/trace-mapping@npm:^0.3.18, @jridgewell/trace-mapping@npm:^0.3.9":
- version: 0.3.20
- resolution: "@jridgewell/trace-mapping@npm:0.3.20"
- dependencies:
- "@jridgewell/resolve-uri": ^3.1.0
- "@jridgewell/sourcemap-codec": ^1.4.14
- checksum: cd1a7353135f385909468ff0cf20bdd37e59f2ee49a13a966dedf921943e222082c583ade2b579ff6cd0d8faafcb5461f253e1bf2a9f48fec439211fdbe788f5
- languageName: node
- linkType: hard
-
-"@npmcli/agent@npm:^2.0.0":
- version: 2.2.0
- resolution: "@npmcli/agent@npm:2.2.0"
- dependencies:
- agent-base: ^7.1.0
- http-proxy-agent: ^7.0.0
- https-proxy-agent: ^7.0.1
- lru-cache: ^10.0.1
- socks-proxy-agent: ^8.0.1
- checksum: 3b25312edbdfaa4089af28e2d423b6f19838b945e47765b0c8174c1395c79d43c3ad6d23cb364b43f59fd3acb02c93e3b493f72ddbe3dfea04c86843a7311fc4
- languageName: node
- linkType: hard
-
-"@npmcli/fs@npm:^2.1.0":
- version: 2.1.2
- resolution: "@npmcli/fs@npm:2.1.2"
- dependencies:
- "@gar/promisify": ^1.1.3
- semver: ^7.3.5
- checksum: 405074965e72d4c9d728931b64d2d38e6ea12066d4fad651ac253d175e413c06fe4350970c783db0d749181da8fe49c42d3880bd1cbc12cd68e3a7964d820225
- languageName: node
- linkType: hard
-
-"@npmcli/fs@npm:^3.1.0":
- version: 3.1.0
- resolution: "@npmcli/fs@npm:3.1.0"
- dependencies:
- semver: ^7.3.5
- checksum: a50a6818de5fc557d0b0e6f50ec780a7a02ab8ad07e5ac8b16bf519e0ad60a144ac64f97d05c443c3367235d337182e1d012bbac0eb8dbae8dc7b40b193efd0e
- languageName: node
- linkType: hard
-
-"@npmcli/move-file@npm:^2.0.0":
- version: 2.0.1
- resolution: "@npmcli/move-file@npm:2.0.1"
- dependencies:
- mkdirp: ^1.0.4
- rimraf: ^3.0.2
- checksum: 52dc02259d98da517fae4cb3a0a3850227bdae4939dda1980b788a7670636ca2b4a01b58df03dd5f65c1e3cb70c50fa8ce5762b582b3f499ec30ee5ce1fd9380
- languageName: node
- linkType: hard
-
-"@pkgjs/parseargs@npm:^0.11.0":
- version: 0.11.0
- resolution: "@pkgjs/parseargs@npm:0.11.0"
- checksum: 6ad6a00fc4f2f2cfc6bff76fb1d88b8ee20bc0601e18ebb01b6d4be583733a860239a521a7fbca73b612e66705078809483549d2b18f370eb346c5155c8e4a0f
- languageName: node
- linkType: hard
-
-"@sinclair/typebox@npm:^0.27.8":
- version: 0.27.8
- resolution: "@sinclair/typebox@npm:0.27.8"
- checksum: 00bd7362a3439021aa1ea51b0e0d0a0e8ca1351a3d54c606b115fdcc49b51b16db6e5f43b4fe7a28c38688523e22a94d49dd31168868b655f0d4d50f032d07a1
- languageName: node
- linkType: hard
-
-"@sinonjs/commons@npm:^3.0.0":
- version: 3.0.0
- resolution: "@sinonjs/commons@npm:3.0.0"
- dependencies:
- type-detect: 4.0.8
- checksum: b4b5b73d4df4560fb8c0c7b38c7ad4aeabedd362f3373859d804c988c725889cde33550e4bcc7cd316a30f5152a2d1d43db71b6d0c38f5feef71fd8d016763f8
- languageName: node
- linkType: hard
-
-"@sinonjs/fake-timers@npm:^10.0.2":
- version: 10.3.0
- resolution: "@sinonjs/fake-timers@npm:10.3.0"
- dependencies:
- "@sinonjs/commons": ^3.0.0
- checksum: 614d30cb4d5201550c940945d44c9e0b6d64a888ff2cd5b357f95ad6721070d6b8839cd10e15b76bf5e14af0bcc1d8f9ec00d49a46318f1f669a4bec1d7f3148
- languageName: node
- linkType: hard
-
-"@tootallnate/once@npm:2":
- version: 2.0.0
- resolution: "@tootallnate/once@npm:2.0.0"
- checksum: ad87447820dd3f24825d2d947ebc03072b20a42bfc96cbafec16bff8bbda6c1a81fcb0be56d5b21968560c5359a0af4038a68ba150c3e1694fe4c109a063bed8
- languageName: node
- linkType: hard
-
-"@types/babel__core@npm:^7.1.14":
- version: 7.20.5
- resolution: "@types/babel__core@npm:7.20.5"
- dependencies:
- "@babel/parser": ^7.20.7
- "@babel/types": ^7.20.7
- "@types/babel__generator": "*"
- "@types/babel__template": "*"
- "@types/babel__traverse": "*"
- checksum: a3226f7930b635ee7a5e72c8d51a357e799d19cbf9d445710fa39ab13804f79ab1a54b72ea7d8e504659c7dfc50675db974b526142c754398d7413aa4bc30845
- languageName: node
- linkType: hard
-
-"@types/babel__generator@npm:*":
- version: 7.6.8
- resolution: "@types/babel__generator@npm:7.6.8"
- dependencies:
- "@babel/types": ^7.0.0
- checksum: 5b332ea336a2efffbdeedb92b6781949b73498606ddd4205462f7d96dafd45ff3618770b41de04c4881e333dd84388bfb8afbdf6f2764cbd98be550d85c6bb48
- languageName: node
- linkType: hard
-
-"@types/babel__template@npm:*":
- version: 7.4.4
- resolution: "@types/babel__template@npm:7.4.4"
- dependencies:
- "@babel/parser": ^7.1.0
- "@babel/types": ^7.0.0
- checksum: d7a02d2a9b67e822694d8e6a7ddb8f2b71a1d6962dfd266554d2513eefbb205b33ca71a0d163b1caea3981ccf849211f9964d8bd0727124d18ace45aa6c9ae29
- languageName: node
- linkType: hard
-
-"@types/babel__traverse@npm:*, @types/babel__traverse@npm:^7.0.6":
- version: 7.20.4
- resolution: "@types/babel__traverse@npm:7.20.4"
- dependencies:
- "@babel/types": ^7.20.7
- checksum: f044ba80e00d07e46ee917c44f96cfc268fcf6d3871f7dfb8db8d3c6dab1508302f3e6bc508352a4a3ae627d2522e3fc500fa55907e0410a08e2e0902a8f3576
- languageName: node
- linkType: hard
-
-"@types/debug@npm:^4.0.0":
- version: 4.1.12
- resolution: "@types/debug@npm:4.1.12"
- dependencies:
- "@types/ms": "*"
- checksum: 47876a852de8240bfdaf7481357af2b88cb660d30c72e73789abf00c499d6bc7cd5e52f41c915d1b9cd8ec9fef5b05688d7b7aef17f7f272c2d04679508d1053
- languageName: node
- linkType: hard
-
-"@types/extend@npm:^3.0.0":
- version: 3.0.4
- resolution: "@types/extend@npm:3.0.4"
- checksum: a3c91b255e883a7e3de83ab71090afd3db96d09a598300adf5ff0b990315486a92ee8447cbaefb21fb21b6269bf19a0e3651054a50481f726e52d4904e2ba25c
- languageName: node
- linkType: hard
-
-"@types/graceful-fs@npm:^4.1.3":
- version: 4.1.9
- resolution: "@types/graceful-fs@npm:4.1.9"
- dependencies:
- "@types/node": "*"
- checksum: 79d746a8f053954bba36bd3d94a90c78de995d126289d656fb3271dd9f1229d33f678da04d10bce6be440494a5a73438e2e363e92802d16b8315b051036c5256
- languageName: node
- linkType: hard
-
-"@types/hast@npm:^2.0.0":
- version: 2.3.9
- resolution: "@types/hast@npm:2.3.9"
- dependencies:
- "@types/unist": ^2
- checksum: 32a742021a973b1e23399f09a21325fda89bf55486068ef7c6364f5054b991cc8ab007f1134cc9d6c7030b6ed60633d70f7401dffb3dec8d10c997330d458a3f
- languageName: node
- linkType: hard
-
-"@types/istanbul-lib-coverage@npm:*, @types/istanbul-lib-coverage@npm:^2.0.0, @types/istanbul-lib-coverage@npm:^2.0.1":
- version: 2.0.6
- resolution: "@types/istanbul-lib-coverage@npm:2.0.6"
- checksum: 3feac423fd3e5449485afac999dcfcb3d44a37c830af898b689fadc65d26526460bedb889db278e0d4d815a670331796494d073a10ee6e3a6526301fe7415778
- languageName: node
- linkType: hard
-
-"@types/istanbul-lib-report@npm:*":
- version: 3.0.3
- resolution: "@types/istanbul-lib-report@npm:3.0.3"
- dependencies:
- "@types/istanbul-lib-coverage": "*"
- checksum: b91e9b60f865ff08cb35667a427b70f6c2c63e88105eadd29a112582942af47ed99c60610180aa8dcc22382fa405033f141c119c69b95db78c4c709fbadfeeb4
- languageName: node
- linkType: hard
-
-"@types/istanbul-reports@npm:^3.0.0":
- version: 3.0.4
- resolution: "@types/istanbul-reports@npm:3.0.4"
- dependencies:
- "@types/istanbul-lib-report": "*"
- checksum: 93eb18835770b3431f68ae9ac1ca91741ab85f7606f310a34b3586b5a34450ec038c3eed7ab19266635499594de52ff73723a54a72a75b9f7d6a956f01edee95
- languageName: node
- linkType: hard
-
-"@types/mdast@npm:^3.0.0":
- version: 3.0.15
- resolution: "@types/mdast@npm:3.0.15"
- dependencies:
- "@types/unist": ^2
- checksum: af85042a4e3af3f879bde4059fa9e76c71cb552dffc896cdcc6cf9dc1fd38e37035c2dbd6245cfa6535b433f1f0478f5549696234ccace47a64055a10c656530
- languageName: node
- linkType: hard
-
-"@types/ms@npm:*":
- version: 0.7.34
- resolution: "@types/ms@npm:0.7.34"
- checksum: f38d36e7b6edecd9badc9cf50474159e9da5fa6965a75186cceaf883278611b9df6669dc3a3cc122b7938d317b68a9e3d573d316fcb35d1be47ec9e468c6bd8a
- languageName: node
- linkType: hard
-
-"@types/node@npm:*, @types/node@npm:^20.1.5":
- version: 20.10.5
- resolution: "@types/node@npm:20.10.5"
- dependencies:
- undici-types: ~5.26.4
- checksum: e216b679f545a8356960ce985a0e53c3a58fff0eacd855e180b9e223b8db2b5bd07b744a002b8c1f0c37f9194648ab4578533b5c12df2ec10cc02f61d20948d2
- languageName: node
- linkType: hard
-
-"@types/normalize-package-data@npm:^2.4.1":
- version: 2.4.4
- resolution: "@types/normalize-package-data@npm:2.4.4"
- checksum: 65dff72b543997b7be8b0265eca7ace0e34b75c3e5fee31de11179d08fa7124a7a5587265d53d0409532ecb7f7fba662c2012807963e1f9b059653ec2c83ee05
- languageName: node
- linkType: hard
-
-"@types/parse5@npm:^6.0.0":
- version: 6.0.3
- resolution: "@types/parse5@npm:6.0.3"
- checksum: ddb59ee4144af5dfcc508a8dcf32f37879d11e12559561e65788756b95b33e6f03ea027d88e1f5408f9b7bfb656bf630ace31a2169edf44151daaf8dd58df1b7
- languageName: node
- linkType: hard
-
-"@types/stack-utils@npm:^2.0.0":
- version: 2.0.3
- resolution: "@types/stack-utils@npm:2.0.3"
- checksum: 72576cc1522090fe497337c2b99d9838e320659ac57fa5560fcbdcbafcf5d0216c6b3a0a8a4ee4fdb3b1f5e3420aa4f6223ab57b82fef3578bec3206425c6cf5
- languageName: node
- linkType: hard
-
-"@types/supports-color@npm:^8.0.0":
- version: 8.1.3
- resolution: "@types/supports-color@npm:8.1.3"
- checksum: f5a3ca4aa94ac9d45beae8aa06dcba45e6d56b77999707a2708b54a9b042f84c68e619b10ef6e4b6f447f801824adebb9ed4d7a82c0b5d5d7bf29d5ff34d53a9
- languageName: node
- linkType: hard
-
-"@types/unist@npm:^2, @types/unist@npm:^2.0.0":
- version: 2.0.10
- resolution: "@types/unist@npm:2.0.10"
- checksum: e2924e18dedf45f68a5c6ccd6015cd62f1643b1b43baac1854efa21ae9e70505db94290434a23da1137d9e31eb58e54ca175982005698ac37300a1c889f6c4aa
- languageName: node
- linkType: hard
-
-"@types/yargs-parser@npm:*":
- version: 21.0.3
- resolution: "@types/yargs-parser@npm:21.0.3"
- checksum: ef236c27f9432983e91432d974243e6c4cdae227cb673740320eff32d04d853eed59c92ca6f1142a335cfdc0e17cccafa62e95886a8154ca8891cc2dec4ee6fc
- languageName: node
- linkType: hard
-
-"@types/yargs@npm:^17.0.8":
- version: 17.0.32
- resolution: "@types/yargs@npm:17.0.32"
- dependencies:
- "@types/yargs-parser": "*"
- checksum: 4505bdebe8716ff383640c6e928f855b5d337cb3c68c81f7249fc6b983d0aa48de3eee26062b84f37e0d75a5797bc745e0c6e76f42f81771252a758c638f36ba
- languageName: node
- linkType: hard
-
-"@vue/compiler-core@npm:3.3.13":
- version: 3.3.13
- resolution: "@vue/compiler-core@npm:3.3.13"
- dependencies:
- "@babel/parser": ^7.23.5
- "@vue/shared": 3.3.13
- estree-walker: ^2.0.2
- source-map-js: ^1.0.2
- checksum: e758146d7805199b60166a3bc8f6f8a4fa8bedf814f5c33a77776fc46585e14f36c11c2f96fa54c8b895e3115716db613c557e705ccadd306c424a7d39585bf8
- languageName: node
- linkType: hard
-
-"@vue/compiler-dom@npm:3.3.13":
- version: 3.3.13
- resolution: "@vue/compiler-dom@npm:3.3.13"
- dependencies:
- "@vue/compiler-core": 3.3.13
- "@vue/shared": 3.3.13
- checksum: 8165ae90d827ba7b0d89382bc404fc52edf40c2fa8f4a54fb4bdd4ed730f33283ccf359795823f6a3c2f31d538a10b828f38dd32d512ebdcc329ebd78ad7a325
- languageName: node
- linkType: hard
-
-"@vue/compiler-sfc@npm:^3.2.37":
- version: 3.3.13
- resolution: "@vue/compiler-sfc@npm:3.3.13"
- dependencies:
- "@babel/parser": ^7.23.5
- "@vue/compiler-core": 3.3.13
- "@vue/compiler-dom": 3.3.13
- "@vue/compiler-ssr": 3.3.13
- "@vue/reactivity-transform": 3.3.13
- "@vue/shared": 3.3.13
- estree-walker: ^2.0.2
- magic-string: ^0.30.5
- postcss: ^8.4.32
- source-map-js: ^1.0.2
- checksum: f78a9a01472a8effed49f524729c762588e38bcaed50a677e099fe1ddc66e3a9fb56a2f1f85020010a44e385e9ef01872d41b540eded28a52c054b887b77b488
- languageName: node
- linkType: hard
-
-"@vue/compiler-ssr@npm:3.3.13":
- version: 3.3.13
- resolution: "@vue/compiler-ssr@npm:3.3.13"
- dependencies:
- "@vue/compiler-dom": 3.3.13
- "@vue/shared": 3.3.13
- checksum: 02f3999467a6290af8a77c112462b23faf34399e55f7ef51b0b9985d75363a1ffcfb915201cb8a5890d50ede75af52356e32d9b950f1be16532842e670fd6b60
- languageName: node
- linkType: hard
-
-"@vue/reactivity-transform@npm:3.3.13":
- version: 3.3.13
- resolution: "@vue/reactivity-transform@npm:3.3.13"
- dependencies:
- "@babel/parser": ^7.23.5
- "@vue/compiler-core": 3.3.13
- "@vue/shared": 3.3.13
- estree-walker: ^2.0.2
- magic-string: ^0.30.5
- checksum: 6abe723dd33302635377acffd9a29121e48dea3dba720eefb26481fc78bfd5e4070674a5871c72c27905a4e01b3e8c27b73060c828a27f1e623ce2913f9a3af3
- languageName: node
- linkType: hard
-
-"@vue/shared@npm:3.3.13":
- version: 3.3.13
- resolution: "@vue/shared@npm:3.3.13"
- checksum: 3f57cd2f4611962aa1a4e9a5926f160f200f1769015240ad02df0245fe25bd2303b88957f35a13fbb0bf8d3e5a649fd41f44f347a0cf29e84e8f41d70123f7bf
- languageName: node
- linkType: hard
-
-"abbrev@npm:^1.0.0":
- version: 1.1.1
- resolution: "abbrev@npm:1.1.1"
- checksum: a4a97ec07d7ea112c517036882b2ac22f3109b7b19077dc656316d07d308438aac28e4d9746dc4d84bf6b1e75b4a7b0a5f3cb30592419f128ca9a8cee3bcfa17
- languageName: node
- linkType: hard
-
-"abbrev@npm:^2.0.0":
- version: 2.0.0
- resolution: "abbrev@npm:2.0.0"
- checksum: 0e994ad2aa6575f94670d8a2149afe94465de9cedaaaac364e7fb43a40c3691c980ff74899f682f4ca58fa96b4cbd7421a015d3a6defe43a442117d7821a2f36
- languageName: node
- linkType: hard
-
-"agent-base@npm:6, agent-base@npm:^6.0.2":
- version: 6.0.2
- resolution: "agent-base@npm:6.0.2"
- dependencies:
- debug: 4
- checksum: f52b6872cc96fd5f622071b71ef200e01c7c4c454ee68bc9accca90c98cfb39f2810e3e9aa330435835eedc8c23f4f8a15267f67c6e245d2b33757575bdac49d
- languageName: node
- linkType: hard
-
-"agent-base@npm:^7.0.2, agent-base@npm:^7.1.0":
- version: 7.1.0
- resolution: "agent-base@npm:7.1.0"
- dependencies:
- debug: ^4.3.4
- checksum: f7828f991470a0cc22cb579c86a18cbae83d8a3cbed39992ab34fc7217c4d126017f1c74d0ab66be87f71455318a8ea3e757d6a37881b8d0f2a2c6aa55e5418f
- languageName: node
- linkType: hard
-
-"agentkeepalive@npm:^4.2.1":
- version: 4.5.0
- resolution: "agentkeepalive@npm:4.5.0"
- dependencies:
- humanize-ms: ^1.2.1
- checksum: 13278cd5b125e51eddd5079f04d6fe0914ac1b8b91c1f3db2c1822f99ac1a7457869068997784342fe455d59daaff22e14fb7b8c3da4e741896e7e31faf92481
- languageName: node
- linkType: hard
-
-"aggregate-error@npm:^3.0.0":
- version: 3.1.0
- resolution: "aggregate-error@npm:3.1.0"
- dependencies:
- clean-stack: ^2.0.0
- indent-string: ^4.0.0
- checksum: 1101a33f21baa27a2fa8e04b698271e64616b886795fd43c31068c07533c7b3facfcaf4e9e0cab3624bd88f729a592f1c901a1a229c9e490eafce411a8644b79
- languageName: node
- linkType: hard
-
-"ansi-escapes@npm:^4.2.1":
- version: 4.3.2
- resolution: "ansi-escapes@npm:4.3.2"
- dependencies:
- type-fest: ^0.21.3
- checksum: 93111c42189c0a6bed9cdb4d7f2829548e943827ee8479c74d6e0b22ee127b2a21d3f8b5ca57723b8ef78ce011fbfc2784350eb2bde3ccfccf2f575fa8489815
- languageName: node
- linkType: hard
-
-"ansi-regex@npm:^5.0.1":
- version: 5.0.1
- resolution: "ansi-regex@npm:5.0.1"
- checksum: 2aa4bb54caf2d622f1afdad09441695af2a83aa3fe8b8afa581d205e57ed4261c183c4d3877cee25794443fde5876417d859c108078ab788d6af7e4fe52eb66b
- languageName: node
- linkType: hard
-
-"ansi-regex@npm:^6.0.1":
- version: 6.0.1
- resolution: "ansi-regex@npm:6.0.1"
- checksum: 1ff8b7667cded1de4fa2c9ae283e979fc87036864317da86a2e546725f96406746411d0d85e87a2d12fa5abd715d90006de7fa4fa0477c92321ad3b4c7d4e169
- languageName: node
- linkType: hard
-
-"ansi-styles@npm:^3.2.1":
- version: 3.2.1
- resolution: "ansi-styles@npm:3.2.1"
- dependencies:
- color-convert: ^1.9.0
- checksum: d85ade01c10e5dd77b6c89f34ed7531da5830d2cb5882c645f330079975b716438cd7ebb81d0d6e6b4f9c577f19ae41ab55f07f19786b02f9dfd9e0377395665
- languageName: node
- linkType: hard
-
-"ansi-styles@npm:^4.0.0, ansi-styles@npm:^4.1.0":
- version: 4.3.0
- resolution: "ansi-styles@npm:4.3.0"
- dependencies:
- color-convert: ^2.0.1
- checksum: 513b44c3b2105dd14cc42a19271e80f386466c4be574bccf60b627432f9198571ebf4ab1e4c3ba17347658f4ee1711c163d574248c0c1cdc2d5917a0ad582ec4
- languageName: node
- linkType: hard
-
-"ansi-styles@npm:^5.0.0":
- version: 5.2.0
- resolution: "ansi-styles@npm:5.2.0"
- checksum: d7f4e97ce0623aea6bc0d90dcd28881ee04cba06c570b97fd3391bd7a268eedfd9d5e2dd4fdcbdd82b8105df5faf6f24aaedc08eaf3da898e702db5948f63469
- languageName: node
- linkType: hard
-
-"ansi-styles@npm:^6.1.0":
- version: 6.2.1
- resolution: "ansi-styles@npm:6.2.1"
- checksum: ef940f2f0ced1a6347398da88a91da7930c33ecac3c77b72c5905f8b8fe402c52e6fde304ff5347f616e27a742da3f1dc76de98f6866c69251ad0b07a66776d9
- languageName: node
- linkType: hard
-
-"anymatch@npm:^3.0.3, anymatch@npm:~3.1.2":
- version: 3.1.3
- resolution: "anymatch@npm:3.1.3"
- dependencies:
- normalize-path: ^3.0.0
- picomatch: ^2.0.4
- checksum: 3e044fd6d1d26545f235a9fe4d7a534e2029d8e59fa7fd9f2a6eb21230f6b5380ea1eaf55136e60cbf8e613544b3b766e7a6fa2102e2a3a117505466e3025dc2
- languageName: node
- linkType: hard
-
-"aproba@npm:^1.0.3 || ^2.0.0":
- version: 2.0.0
- resolution: "aproba@npm:2.0.0"
- checksum: 5615cadcfb45289eea63f8afd064ab656006361020e1735112e346593856f87435e02d8dcc7ff0d11928bc7d425f27bc7c2a84f6c0b35ab0ff659c814c138a24
- languageName: node
- linkType: hard
-
-"are-we-there-yet@npm:^3.0.0":
- version: 3.0.1
- resolution: "are-we-there-yet@npm:3.0.1"
- dependencies:
- delegates: ^1.0.0
- readable-stream: ^3.6.0
- checksum: 52590c24860fa7173bedeb69a4c05fb573473e860197f618b9a28432ee4379049336727ae3a1f9c4cb083114601c1140cee578376164d0e651217a9843f9fe83
- languageName: node
- linkType: hard
-
-"argparse@npm:^1.0.7":
- version: 1.0.10
- resolution: "argparse@npm:1.0.10"
- dependencies:
- sprintf-js: ~1.0.2
- checksum: 7ca6e45583a28de7258e39e13d81e925cfa25d7d4aacbf806a382d3c02fcb13403a07fb8aeef949f10a7cfe4a62da0e2e807b348a5980554cc28ee573ef95945
- languageName: node
- linkType: hard
-
-"argparse@npm:^2.0.1":
- version: 2.0.1
- resolution: "argparse@npm:2.0.1"
- checksum: 83644b56493e89a254bae05702abf3a1101b4fa4d0ca31df1c9985275a5a5bd47b3c27b7fa0b71098d41114d8ca000e6ed90cad764b306f8a503665e4d517ced
- languageName: node
- linkType: hard
-
-"babel-jest@npm:^29.7.0":
- version: 29.7.0
- resolution: "babel-jest@npm:29.7.0"
- dependencies:
- "@jest/transform": ^29.7.0
- "@types/babel__core": ^7.1.14
- babel-plugin-istanbul: ^6.1.1
- babel-preset-jest: ^29.6.3
- chalk: ^4.0.0
- graceful-fs: ^4.2.9
- slash: ^3.0.0
- peerDependencies:
- "@babel/core": ^7.8.0
- checksum: ee6f8e0495afee07cac5e4ee167be705c711a8cc8a737e05a587a131fdae2b3c8f9aa55dfd4d9c03009ac2d27f2de63d8ba96d3e8460da4d00e8af19ef9a83f7
- languageName: node
- linkType: hard
-
-"babel-plugin-istanbul@npm:^6.1.1":
- version: 6.1.1
- resolution: "babel-plugin-istanbul@npm:6.1.1"
- dependencies:
- "@babel/helper-plugin-utils": ^7.0.0
- "@istanbuljs/load-nyc-config": ^1.0.0
- "@istanbuljs/schema": ^0.1.2
- istanbul-lib-instrument: ^5.0.4
- test-exclude: ^6.0.0
- checksum: cb4fd95738219f232f0aece1116628cccff16db891713c4ccb501cddbbf9272951a5df81f2f2658dfdf4b3e7b236a9d5cbcf04d5d8c07dd5077297339598061a
- languageName: node
- linkType: hard
-
-"babel-plugin-jest-hoist@npm:^29.6.3":
- version: 29.6.3
- resolution: "babel-plugin-jest-hoist@npm:29.6.3"
- dependencies:
- "@babel/template": ^7.3.3
- "@babel/types": ^7.3.3
- "@types/babel__core": ^7.1.14
- "@types/babel__traverse": ^7.0.6
- checksum: 51250f22815a7318f17214a9d44650ba89551e6d4f47a2dc259128428324b52f5a73979d010cefd921fd5a720d8c1d55ad74ff601cd94c7bd44d5f6292fde2d1
- languageName: node
- linkType: hard
-
-"babel-preset-current-node-syntax@npm:^1.0.0":
- version: 1.0.1
- resolution: "babel-preset-current-node-syntax@npm:1.0.1"
- dependencies:
- "@babel/plugin-syntax-async-generators": ^7.8.4
- "@babel/plugin-syntax-bigint": ^7.8.3
- "@babel/plugin-syntax-class-properties": ^7.8.3
- "@babel/plugin-syntax-import-meta": ^7.8.3
- "@babel/plugin-syntax-json-strings": ^7.8.3
- "@babel/plugin-syntax-logical-assignment-operators": ^7.8.3
- "@babel/plugin-syntax-nullish-coalescing-operator": ^7.8.3
- "@babel/plugin-syntax-numeric-separator": ^7.8.3
- "@babel/plugin-syntax-object-rest-spread": ^7.8.3
- "@babel/plugin-syntax-optional-catch-binding": ^7.8.3
- "@babel/plugin-syntax-optional-chaining": ^7.8.3
- "@babel/plugin-syntax-top-level-await": ^7.8.3
- peerDependencies:
- "@babel/core": ^7.0.0
- checksum: d118c2742498c5492c095bc8541f4076b253e705b5f1ad9a2e7d302d81a84866f0070346662355c8e25fc02caa28dc2da8d69bcd67794a0d60c4d6fab6913cc8
- languageName: node
- linkType: hard
-
-"babel-preset-jest@npm:^29.6.3":
- version: 29.6.3
- resolution: "babel-preset-jest@npm:29.6.3"
- dependencies:
- babel-plugin-jest-hoist: ^29.6.3
- babel-preset-current-node-syntax: ^1.0.0
- peerDependencies:
- "@babel/core": ^7.0.0
- checksum: aa4ff2a8a728d9d698ed521e3461a109a1e66202b13d3494e41eea30729a5e7cc03b3a2d56c594423a135429c37bf63a9fa8b0b9ce275298be3095a88c69f6fb
- languageName: node
- linkType: hard
-
-"bail@npm:^2.0.0":
- version: 2.0.2
- resolution: "bail@npm:2.0.2"
- checksum: aab4e8ccdc8d762bf3fdfce8e706601695620c0c2eda256dd85088dc0be3cfd7ff126f6e99c2bee1f24f5d418414aacf09d7f9702f16d6963df2fa488cda8824
- languageName: node
- linkType: hard
-
-"balanced-match@npm:^1.0.0":
- version: 1.0.2
- resolution: "balanced-match@npm:1.0.2"
- checksum: 9706c088a283058a8a99e0bf91b0a2f75497f185980d9ffa8b304de1d9e58ebda7c72c07ebf01dadedaac5b2907b2c6f566f660d62bd336c3468e960403b9d65
- languageName: node
- linkType: hard
-
-"base64-js@npm:^1.3.1":
- version: 1.5.1
- resolution: "base64-js@npm:1.5.1"
- checksum: 669632eb3745404c2f822a18fc3a0122d2f9a7a13f7fb8b5823ee19d1d2ff9ee5b52c53367176ea4ad093c332fd5ab4bd0ebae5a8e27917a4105a4cfc86b1005
- languageName: node
- linkType: hard
-
-"binary-extensions@npm:^2.0.0":
- version: 2.2.0
- resolution: "binary-extensions@npm:2.2.0"
- checksum: ccd267956c58d2315f5d3ea6757cf09863c5fc703e50fbeb13a7dc849b812ef76e3cf9ca8f35a0c48498776a7478d7b4a0418e1e2b8cb9cb9731f2922aaad7f8
- languageName: node
- linkType: hard
-
-"bl@npm:^4.0.3":
- version: 4.1.0
- resolution: "bl@npm:4.1.0"
- dependencies:
- buffer: ^5.5.0
- inherits: ^2.0.4
- readable-stream: ^3.4.0
- checksum: 9e8521fa7e83aa9427c6f8ccdcba6e8167ef30cc9a22df26effcc5ab682ef91d2cbc23a239f945d099289e4bbcfae7a192e9c28c84c6202e710a0dfec3722662
- languageName: node
- linkType: hard
-
-"brace-expansion@npm:^1.1.7":
- version: 1.1.11
- resolution: "brace-expansion@npm:1.1.11"
- dependencies:
- balanced-match: ^1.0.0
- concat-map: 0.0.1
- checksum: faf34a7bb0c3fcf4b59c7808bc5d2a96a40988addf2e7e09dfbb67a2251800e0d14cd2bfc1aa79174f2f5095c54ff27f46fb1289fe2d77dac755b5eb3434cc07
- languageName: node
- linkType: hard
-
-"brace-expansion@npm:^2.0.1":
- version: 2.0.1
- resolution: "brace-expansion@npm:2.0.1"
- dependencies:
- balanced-match: ^1.0.0
- checksum: a61e7cd2e8a8505e9f0036b3b6108ba5e926b4b55089eeb5550cd04a471fe216c96d4fe7e4c7f995c728c554ae20ddfc4244cad10aef255e72b62930afd233d1
- languageName: node
- linkType: hard
-
-"braces@npm:^3.0.2, braces@npm:~3.0.2":
- version: 3.0.3
- resolution: "braces@npm:3.0.3"
- dependencies:
- fill-range: ^7.1.1
- checksum: b95aa0b3bd909f6cd1720ffcf031aeaf46154dd88b4da01f9a1d3f7ea866a79eba76a6d01cbc3c422b2ee5cdc39a4f02491058d5df0d7bf6e6a162a832df1f69
- languageName: node
- linkType: hard
-
-"browserslist@npm:^4.22.2":
- version: 4.22.2
- resolution: "browserslist@npm:4.22.2"
- dependencies:
- caniuse-lite: ^1.0.30001565
- electron-to-chromium: ^1.4.601
- node-releases: ^2.0.14
- update-browserslist-db: ^1.0.13
- bin:
- browserslist: cli.js
- checksum: 33ddfcd9145220099a7a1ac533cecfe5b7548ffeb29b313e1b57be6459000a1f8fa67e781cf4abee97268ac594d44134fcc4a6b2b4750ceddc9796e3a22076d9
- languageName: node
- linkType: hard
-
-"bser@npm:2.1.1":
- version: 2.1.1
- resolution: "bser@npm:2.1.1"
- dependencies:
- node-int64: ^0.4.0
- checksum: 9ba4dc58ce86300c862bffc3ae91f00b2a03b01ee07f3564beeeaf82aa243b8b03ba53f123b0b842c190d4399b94697970c8e7cf7b1ea44b61aa28c3526a4449
- languageName: node
- linkType: hard
-
-"buffer-from@npm:^1.0.0":
- version: 1.1.2
- resolution: "buffer-from@npm:1.1.2"
- checksum: 0448524a562b37d4d7ed9efd91685a5b77a50672c556ea254ac9a6d30e3403a517d8981f10e565db24e8339413b43c97ca2951f10e399c6125a0d8911f5679bb
- languageName: node
- linkType: hard
-
-"buffer@npm:^5.5.0":
- version: 5.7.1
- resolution: "buffer@npm:5.7.1"
- dependencies:
- base64-js: ^1.3.1
- ieee754: ^1.1.13
- checksum: e2cf8429e1c4c7b8cbd30834ac09bd61da46ce35f5c22a78e6c2f04497d6d25541b16881e30a019c6fd3154150650ccee27a308eff3e26229d788bbdeb08ab84
- languageName: node
- linkType: hard
-
-"cacache@npm:^16.1.0":
- version: 16.1.3
- resolution: "cacache@npm:16.1.3"
- dependencies:
- "@npmcli/fs": ^2.1.0
- "@npmcli/move-file": ^2.0.0
- chownr: ^2.0.0
- fs-minipass: ^2.1.0
- glob: ^8.0.1
- infer-owner: ^1.0.4
- lru-cache: ^7.7.1
- minipass: ^3.1.6
- minipass-collect: ^1.0.2
- minipass-flush: ^1.0.5
- minipass-pipeline: ^1.2.4
- mkdirp: ^1.0.4
- p-map: ^4.0.0
- promise-inflight: ^1.0.1
- rimraf: ^3.0.2
- ssri: ^9.0.0
- tar: ^6.1.11
- unique-filename: ^2.0.0
- checksum: d91409e6e57d7d9a3a25e5dcc589c84e75b178ae8ea7de05cbf6b783f77a5fae938f6e8fda6f5257ed70000be27a681e1e44829251bfffe4c10216002f8f14e6
- languageName: node
- linkType: hard
-
-"cacache@npm:^18.0.0":
- version: 18.0.2
- resolution: "cacache@npm:18.0.2"
- dependencies:
- "@npmcli/fs": ^3.1.0
- fs-minipass: ^3.0.0
- glob: ^10.2.2
- lru-cache: ^10.0.1
- minipass: ^7.0.3
- minipass-collect: ^2.0.1
- minipass-flush: ^1.0.5
- minipass-pipeline: ^1.2.4
- p-map: ^4.0.0
- ssri: ^10.0.0
- tar: ^6.1.11
- unique-filename: ^3.0.0
- checksum: 0250df80e1ad0c828c956744850c5f742c24244e9deb5b7dc81bca90f8c10e011e132ecc58b64497cc1cad9a98968676147fb6575f4f94722f7619757b17a11b
- languageName: node
- linkType: hard
-
-"callsites@npm:^3.0.0":
- version: 3.1.0
- resolution: "callsites@npm:3.1.0"
- checksum: 072d17b6abb459c2ba96598918b55868af677154bec7e73d222ef95a8fdb9bbf7dae96a8421085cdad8cd190d86653b5b6dc55a4484f2e5b2e27d5e0c3fc15b3
- languageName: node
- linkType: hard
-
-"camelcase@npm:^5.3.1":
- version: 5.3.1
- resolution: "camelcase@npm:5.3.1"
- checksum: e6effce26b9404e3c0f301498184f243811c30dfe6d0b9051863bd8e4034d09c8c2923794f280d6827e5aa055f6c434115ff97864a16a963366fb35fd673024b
- languageName: node
- linkType: hard
-
-"camelcase@npm:^6.2.0":
- version: 6.3.0
- resolution: "camelcase@npm:6.3.0"
- checksum: 8c96818a9076434998511251dcb2761a94817ea17dbdc37f47ac080bd088fc62c7369429a19e2178b993497132c8cbcf5cc1f44ba963e76782ba469c0474938d
- languageName: node
- linkType: hard
-
-"caniuse-lite@npm:^1.0.30001565":
- version: 1.0.30001572
- resolution: "caniuse-lite@npm:1.0.30001572"
- checksum: 7d017a99a38e29ccee4ed3fc0ef1eb90cf082fcd3a7909c5c536c4ba1d55c5b26ecc1e4ad82c1caa6bfadce526764b354608710c9b61a75bdc7ce8ca15c5fcf2
- languageName: node
- linkType: hard
-
-"ccount@npm:^2.0.0":
- version: 2.0.1
- resolution: "ccount@npm:2.0.1"
- checksum: 48193dada54c9e260e0acf57fc16171a225305548f9ad20d5471e0f7a8c026aedd8747091dccb0d900cde7df4e4ddbd235df0d8de4a64c71b12f0d3303eeafd4
- languageName: node
- linkType: hard
-
-"chalk@npm:^2.4.2":
- version: 2.4.2
- resolution: "chalk@npm:2.4.2"
- dependencies:
- ansi-styles: ^3.2.1
- escape-string-regexp: ^1.0.5
- supports-color: ^5.3.0
- checksum: ec3661d38fe77f681200f878edbd9448821924e0f93a9cefc0e26a33b145f1027a2084bf19967160d11e1f03bfe4eaffcabf5493b89098b2782c3fe0b03d80c2
- languageName: node
- linkType: hard
-
-"chalk@npm:^4.0.0":
- version: 4.1.2
- resolution: "chalk@npm:4.1.2"
- dependencies:
- ansi-styles: ^4.1.0
- supports-color: ^7.1.0
- checksum: fe75c9d5c76a7a98d45495b91b2172fa3b7a09e0cc9370e5c8feb1c567b85c4288e2b3fded7cfdd7359ac28d6b3844feb8b82b8686842e93d23c827c417e83fc
- languageName: node
- linkType: hard
-
-"chalk@npm:^5.0.1":
- version: 5.3.0
- resolution: "chalk@npm:5.3.0"
- checksum: 623922e077b7d1e9dedaea6f8b9e9352921f8ae3afe739132e0e00c275971bdd331268183b2628cf4ab1727c45ea1f28d7e24ac23ce1db1eb653c414ca8a5a80
- languageName: node
- linkType: hard
-
-"char-regex@npm:^1.0.2":
- version: 1.0.2
- resolution: "char-regex@npm:1.0.2"
- checksum: b563e4b6039b15213114626621e7a3d12f31008bdce20f9c741d69987f62aeaace7ec30f6018890ad77b2e9b4d95324c9f5acfca58a9441e3b1dcdd1e2525d17
- languageName: node
- linkType: hard
-
-"character-entities-html4@npm:^2.0.0":
- version: 2.1.0
- resolution: "character-entities-html4@npm:2.1.0"
- checksum: 7034aa7c7fa90309667f6dd50499c8a760c3d3a6fb159adb4e0bada0107d194551cdbad0714302f62d06ce4ed68565c8c2e15fdef2e8f8764eb63fa92b34b11d
- languageName: node
- linkType: hard
-
-"character-entities-legacy@npm:^3.0.0":
- version: 3.0.0
- resolution: "character-entities-legacy@npm:3.0.0"
- checksum: 7582af055cb488b626d364b7d7a4e46b06abd526fb63c0e4eb35bcb9c9799cc4f76b39f34fdccef2d1174ac95e53e9ab355aae83227c1a2505877893fce77731
- languageName: node
- linkType: hard
-
-"character-entities@npm:^2.0.0":
- version: 2.0.2
- resolution: "character-entities@npm:2.0.2"
- checksum: cf1643814023697f725e47328fcec17923b8f1799102a8a79c1514e894815651794a2bffd84bb1b3a4b124b050154e4529ed6e81f7c8068a734aecf07a6d3def
- languageName: node
- linkType: hard
-
-"chokidar@npm:^3.5.3":
- version: 3.5.3
- resolution: "chokidar@npm:3.5.3"
- dependencies:
- anymatch: ~3.1.2
- braces: ~3.0.2
- fsevents: ~2.3.2
- glob-parent: ~5.1.2
- is-binary-path: ~2.1.0
- is-glob: ~4.0.1
- normalize-path: ~3.0.0
- readdirp: ~3.6.0
- dependenciesMeta:
- fsevents:
- optional: true
- checksum: b49fcde40176ba007ff361b198a2d35df60d9bb2a5aab228279eb810feae9294a6b4649ab15981304447afe1e6ffbf4788ad5db77235dc770ab777c6e771980c
- languageName: node
- linkType: hard
-
-"chownr@npm:^1.1.1":
- version: 1.1.4
- resolution: "chownr@npm:1.1.4"
- checksum: 115648f8eb38bac5e41c3857f3e663f9c39ed6480d1349977c4d96c95a47266fcacc5a5aabf3cb6c481e22d72f41992827db47301851766c4fd77ac21a4f081d
- languageName: node
- linkType: hard
-
-"chownr@npm:^2.0.0":
- version: 2.0.0
- resolution: "chownr@npm:2.0.0"
- checksum: c57cf9dd0791e2f18a5ee9c1a299ae6e801ff58fee96dc8bfd0dcb4738a6ce58dd252a3605b1c93c6418fe4f9d5093b28ffbf4d66648cb2a9c67eaef9679be2f
- languageName: node
- linkType: hard
-
-"ci-info@npm:^3.2.0":
- version: 3.9.0
- resolution: "ci-info@npm:3.9.0"
- checksum: 6b19dc9b2966d1f8c2041a838217299718f15d6c4b63ae36e4674edd2bee48f780e94761286a56aa59eb305a85fbea4ddffb7630ec063e7ec7e7e5ad42549a87
- languageName: node
- linkType: hard
-
-"cjs-module-lexer@npm:^1.0.0":
- version: 1.2.3
- resolution: "cjs-module-lexer@npm:1.2.3"
- checksum: 5ea3cb867a9bb609b6d476cd86590d105f3cfd6514db38ff71f63992ab40939c2feb68967faa15a6d2b1f90daa6416b79ea2de486e9e2485a6f8b66a21b4fb0a
- languageName: node
- linkType: hard
-
-"clean-stack@npm:^2.0.0":
- version: 2.2.0
- resolution: "clean-stack@npm:2.2.0"
- checksum: 2ac8cd2b2f5ec986a3c743935ec85b07bc174d5421a5efc8017e1f146a1cf5f781ae962618f416352103b32c9cd7e203276e8c28241bbe946160cab16149fb68
- languageName: node
- linkType: hard
-
-"cliui@npm:^8.0.1":
- version: 8.0.1
- resolution: "cliui@npm:8.0.1"
- dependencies:
- string-width: ^4.2.0
- strip-ansi: ^6.0.1
- wrap-ansi: ^7.0.0
- checksum: 79648b3b0045f2e285b76fb2e24e207c6db44323581e421c3acbd0e86454cba1b37aea976ab50195a49e7384b871e6dfb2247ad7dec53c02454ac6497394cb56
- languageName: node
- linkType: hard
-
-"co@npm:^4.6.0":
- version: 4.6.0
- resolution: "co@npm:4.6.0"
- checksum: 5210d9223010eb95b29df06a91116f2cf7c8e0748a9013ed853b53f362ea0e822f1e5bb054fb3cefc645239a4cf966af1f6133a3b43f40d591f3b68ed6cf0510
- languageName: node
- linkType: hard
-
-"collect-v8-coverage@npm:^1.0.0":
- version: 1.0.2
- resolution: "collect-v8-coverage@npm:1.0.2"
- checksum: c10f41c39ab84629d16f9f6137bc8a63d332244383fc368caf2d2052b5e04c20cd1fd70f66fcf4e2422b84c8226598b776d39d5f2d2a51867cc1ed5d1982b4da
- languageName: node
- linkType: hard
-
-"color-convert@npm:^1.9.0":
- version: 1.9.3
- resolution: "color-convert@npm:1.9.3"
- dependencies:
- color-name: 1.1.3
- checksum: fd7a64a17cde98fb923b1dd05c5f2e6f7aefda1b60d67e8d449f9328b4e53b228a428fd38bfeaeb2db2ff6b6503a776a996150b80cdf224062af08a5c8a3a203
- languageName: node
- linkType: hard
-
-"color-convert@npm:^2.0.1":
- version: 2.0.1
- resolution: "color-convert@npm:2.0.1"
- dependencies:
- color-name: ~1.1.4
- checksum: 79e6bdb9fd479a205c71d89574fccfb22bd9053bd98c6c4d870d65c132e5e904e6034978e55b43d69fcaa7433af2016ee203ce76eeba9cfa554b373e7f7db336
- languageName: node
- linkType: hard
-
-"color-name@npm:1.1.3":
- version: 1.1.3
- resolution: "color-name@npm:1.1.3"
- checksum: 09c5d3e33d2105850153b14466501f2bfb30324a2f76568a408763a3b7433b0e50e5b4ab1947868e65cb101bb7cb75029553f2c333b6d4b8138a73fcc133d69d
- languageName: node
- linkType: hard
-
-"color-name@npm:~1.1.4":
- version: 1.1.4
- resolution: "color-name@npm:1.1.4"
- checksum: b0445859521eb4021cd0fb0cc1a75cecf67fceecae89b63f62b201cca8d345baf8b952c966862a9d9a2632987d4f6581f0ec8d957dfacece86f0a7919316f610
- languageName: node
- linkType: hard
-
-"color-support@npm:^1.1.3":
- version: 1.1.3
- resolution: "color-support@npm:1.1.3"
- bin:
- color-support: bin.js
- checksum: 9b7356817670b9a13a26ca5af1c21615463b500783b739b7634a0c2047c16cef4b2865d7576875c31c3cddf9dd621fa19285e628f20198b233a5cfdda6d0793b
- languageName: node
- linkType: hard
-
-"comma-separated-tokens@npm:^2.0.0":
- version: 2.0.3
- resolution: "comma-separated-tokens@npm:2.0.3"
- checksum: e3bf9e0332a5c45f49b90e79bcdb4a7a85f28d6a6f0876a94f1bb9b2bfbdbbb9292aac50e1e742d8c0db1e62a0229a106f57917e2d067fca951d81737651700d
- languageName: node
- linkType: hard
-
-"concat-map@npm:0.0.1":
- version: 0.0.1
- resolution: "concat-map@npm:0.0.1"
- checksum: 902a9f5d8967a3e2faf138d5cb784b9979bad2e6db5357c5b21c568df4ebe62bcb15108af1b2253744844eb964fc023fbd9afbbbb6ddd0bcc204c6fb5b7bf3af
- languageName: node
- linkType: hard
-
-"console-control-strings@npm:^1.1.0":
- version: 1.1.0
- resolution: "console-control-strings@npm:1.1.0"
- checksum: 8755d76787f94e6cf79ce4666f0c5519906d7f5b02d4b884cf41e11dcd759ed69c57da0670afd9236d229a46e0f9cf519db0cd829c6dca820bb5a5c3def584ed
- languageName: node
- linkType: hard
-
-"convert-source-map@npm:^2.0.0":
- version: 2.0.0
- resolution: "convert-source-map@npm:2.0.0"
- checksum: 63ae9933be5a2b8d4509daca5124e20c14d023c820258e484e32dc324d34c2754e71297c94a05784064ad27615037ef677e3f0c00469fb55f409d2bb21261035
- languageName: node
- linkType: hard
-
-"create-jest@npm:^29.7.0":
- version: 29.7.0
- resolution: "create-jest@npm:29.7.0"
- dependencies:
- "@jest/types": ^29.6.3
- chalk: ^4.0.0
- exit: ^0.1.2
- graceful-fs: ^4.2.9
- jest-config: ^29.7.0
- jest-util: ^29.7.0
- prompts: ^2.0.1
- bin:
- create-jest: bin/create-jest.js
- checksum: 1427d49458adcd88547ef6fa39041e1fe9033a661293aa8d2c3aa1b4967cb5bf4f0c00436c7a61816558f28ba2ba81a94d5c962e8022ea9a883978fc8e1f2945
- languageName: node
- linkType: hard
-
-"cross-spawn@npm:^7.0.0, cross-spawn@npm:^7.0.3":
- version: 7.0.3
- resolution: "cross-spawn@npm:7.0.3"
- dependencies:
- path-key: ^3.1.0
- shebang-command: ^2.0.0
- which: ^2.0.1
- checksum: 671cc7c7288c3a8406f3c69a3ae2fc85555c04169e9d611def9a675635472614f1c0ed0ef80955d5b6d4e724f6ced67f0ad1bb006c2ea643488fcfef994d7f52
- languageName: node
- linkType: hard
-
-"de-indent@npm:^1.0.2":
- version: 1.0.2
- resolution: "de-indent@npm:1.0.2"
- checksum: 8deacc0f4a397a4414a0fc4d0034d2b7782e7cb4eaf34943ea47754e08eccf309a0e71fa6f56cc48de429ede999a42d6b4bca761bf91683be0095422dbf24611
- languageName: node
- linkType: hard
-
-"debug@npm:4, debug@npm:^4.0.0, debug@npm:^4.1.0, debug@npm:^4.1.1, debug@npm:^4.3.1, debug@npm:^4.3.3, debug@npm:^4.3.4":
- version: 4.3.4
- resolution: "debug@npm:4.3.4"
- dependencies:
- ms: 2.1.2
- peerDependenciesMeta:
- supports-color:
- optional: true
- checksum: 3dbad3f94ea64f34431a9cbf0bafb61853eda57bff2880036153438f50fb5a84f27683ba0d8e5426bf41a8c6ff03879488120cf5b3a761e77953169c0600a708
- languageName: node
- linkType: hard
-
-"decode-named-character-reference@npm:^1.0.0":
- version: 1.0.2
- resolution: "decode-named-character-reference@npm:1.0.2"
- dependencies:
- character-entities: ^2.0.0
- checksum: f4c71d3b93105f20076052f9cb1523a22a9c796b8296cd35eef1ca54239c78d182c136a848b83ff8da2071e3ae2b1d300bf29d00650a6d6e675438cc31b11d78
- languageName: node
- linkType: hard
-
-"dedent@npm:^1.0.0":
- version: 1.5.1
- resolution: "dedent@npm:1.5.1"
- peerDependencies:
- babel-plugin-macros: ^3.1.0
- peerDependenciesMeta:
- babel-plugin-macros:
- optional: true
- checksum: c3c300a14edf1bdf5a873f9e4b22e839d62490bc5c8d6169c1f15858a1a76733d06a9a56930e963d677a2ceeca4b6b0894cc5ea2f501aa382ca5b92af3413c2a
- languageName: node
- linkType: hard
-
-"deepmerge@npm:^4.2.2":
- version: 4.3.1
- resolution: "deepmerge@npm:4.3.1"
- checksum: 2024c6a980a1b7128084170c4cf56b0fd58a63f2da1660dcfe977415f27b17dbe5888668b59d0b063753f3220719d5e400b7f113609489c90160bb9a5518d052
- languageName: node
- linkType: hard
-
-"delegates@npm:^1.0.0":
- version: 1.0.0
- resolution: "delegates@npm:1.0.0"
- checksum: a51744d9b53c164ba9c0492471a1a2ffa0b6727451bdc89e31627fdf4adda9d51277cfcbfb20f0a6f08ccb3c436f341df3e92631a3440226d93a8971724771fd
- languageName: node
- linkType: hard
-
-"dequal@npm:^2.0.0":
- version: 2.0.3
- resolution: "dequal@npm:2.0.3"
- checksum: 8679b850e1a3d0ebbc46ee780d5df7b478c23f335887464023a631d1b9af051ad4a6595a44220f9ff8ff95a8ddccf019b5ad778a976fd7bbf77383d36f412f90
- languageName: node
- linkType: hard
-
-"detect-newline@npm:^3.0.0":
- version: 3.1.0
- resolution: "detect-newline@npm:3.1.0"
- checksum: ae6cd429c41ad01b164c59ea36f264a2c479598e61cba7c99da24175a7ab80ddf066420f2bec9a1c57a6bead411b4655ff15ad7d281c000a89791f48cbe939e7
- languageName: node
- linkType: hard
-
-"diff-sequences@npm:^29.6.3":
- version: 29.6.3
- resolution: "diff-sequences@npm:29.6.3"
- checksum: f4914158e1f2276343d98ff5b31fc004e7304f5470bf0f1adb2ac6955d85a531a6458d33e87667f98f6ae52ebd3891bb47d420bb48a5bd8b7a27ee25b20e33aa
- languageName: node
- linkType: hard
-
-"diff@npm:^5.0.0, diff@npm:^5.1.0":
- version: 5.1.0
- resolution: "diff@npm:5.1.0"
- checksum: c7bf0df7c9bfbe1cf8a678fd1b2137c4fb11be117a67bc18a0e03ae75105e8533dbfb1cda6b46beb3586ef5aed22143ef9d70713977d5fb1f9114e21455fba90
- languageName: node
- linkType: hard
-
-"doctrine-temporary-fork@npm:2.1.0":
- version: 2.1.0
- resolution: "doctrine-temporary-fork@npm:2.1.0"
- dependencies:
- esutils: ^2.0.2
- checksum: fa625c9d55bc4affd944757eff0268945bb2bda5eed198163cd66da6b80fff93b9384ce80261d3640985880e4d22a1df9fdd906fcf06cf56d79dce570bdb578a
- languageName: node
- linkType: hard
-
-"documentation@npm:^14.0.2":
- version: 14.0.2
- resolution: "documentation@npm:14.0.2"
- dependencies:
- "@babel/core": ^7.18.10
- "@babel/generator": ^7.18.10
- "@babel/parser": ^7.18.11
- "@babel/traverse": ^7.18.11
- "@babel/types": ^7.18.10
- "@vue/compiler-sfc": ^3.2.37
- chalk: ^5.0.1
- chokidar: ^3.5.3
- diff: ^5.1.0
- doctrine-temporary-fork: 2.1.0
- git-url-parse: ^13.1.0
- github-slugger: 1.4.0
- glob: ^8.0.3
- globals-docs: ^2.4.1
- highlight.js: ^11.6.0
- ini: ^3.0.0
- js-yaml: ^4.1.0
- konan: ^2.1.1
- lodash: ^4.17.21
- mdast-util-find-and-replace: ^2.2.1
- mdast-util-inject: ^1.1.0
- micromark-util-character: ^1.1.0
- parse-filepath: ^1.0.2
- pify: ^6.0.0
- read-pkg-up: ^9.1.0
- remark: ^14.0.2
- remark-gfm: ^3.0.1
- remark-html: ^15.0.1
- remark-reference-links: ^6.0.1
- remark-toc: ^8.0.1
- resolve: ^1.22.1
- strip-json-comments: ^5.0.0
- unist-builder: ^3.0.0
- unist-util-visit: ^4.1.0
- vfile: ^5.3.4
- vfile-reporter: ^7.0.4
- vfile-sort: ^3.0.0
- vue-template-compiler: ^2.7.8
- yargs: ^17.5.1
- dependenciesMeta:
- "@vue/compiler-sfc":
- optional: true
- vue-template-compiler:
- optional: true
- bin:
- documentation: bin/documentation.js
- checksum: fa6734ce55d3bc6397c900e8093044ba1a411949478dc46342e99f765422724e181d47d4cbcbf979aeb46add99174b995da985d705c8e432d9f9acd0e97116d5
- languageName: node
- linkType: hard
-
-"eastasianwidth@npm:^0.2.0":
- version: 0.2.0
- resolution: "eastasianwidth@npm:0.2.0"
- checksum: 7d00d7cd8e49b9afa762a813faac332dee781932d6f2c848dc348939c4253f1d4564341b7af1d041853bc3f32c2ef141b58e0a4d9862c17a7f08f68df1e0f1ed
- languageName: node
- linkType: hard
-
-"electron-to-chromium@npm:^1.4.601":
- version: 1.4.616
- resolution: "electron-to-chromium@npm:1.4.616"
- checksum: 9fd53bd4e5cded61ee51164a0d23ced1d7677ab176ef8e28eb4a27ceaae1deb3bb0038024db48478507204bfcd48ef66866c078721915a9c7b019697cc5680bf
- languageName: node
- linkType: hard
-
-"emittery@npm:^0.13.1":
- version: 0.13.1
- resolution: "emittery@npm:0.13.1"
- checksum: 2b089ab6306f38feaabf4f6f02792f9ec85fc054fda79f44f6790e61bbf6bc4e1616afb9b232e0c5ec5289a8a452f79bfa6d905a6fd64e94b49981f0934001c6
- languageName: node
- linkType: hard
-
-"emoji-regex@npm:^8.0.0":
- version: 8.0.0
- resolution: "emoji-regex@npm:8.0.0"
- checksum: d4c5c39d5a9868b5fa152f00cada8a936868fd3367f33f71be515ecee4c803132d11b31a6222b2571b1e5f7e13890156a94880345594d0ce7e3c9895f560f192
- languageName: node
- linkType: hard
-
-"emoji-regex@npm:^9.2.2":
- version: 9.2.2
- resolution: "emoji-regex@npm:9.2.2"
- checksum: 8487182da74aabd810ac6d6f1994111dfc0e331b01271ae01ec1eb0ad7b5ecc2bbbbd2f053c05cb55a1ac30449527d819bbfbf0e3de1023db308cbcb47f86601
- languageName: node
- linkType: hard
-
-"encoding@npm:^0.1.13":
- version: 0.1.13
- resolution: "encoding@npm:0.1.13"
- dependencies:
- iconv-lite: ^0.6.2
- checksum: bb98632f8ffa823996e508ce6a58ffcf5856330fde839ae42c9e1f436cc3b5cc651d4aeae72222916545428e54fd0f6aa8862fd8d25bdbcc4589f1e3f3715e7f
- languageName: node
- linkType: hard
-
-"end-of-stream@npm:^1.1.0, end-of-stream@npm:^1.4.1":
- version: 1.4.4
- resolution: "end-of-stream@npm:1.4.4"
- dependencies:
- once: ^1.4.0
- checksum: 530a5a5a1e517e962854a31693dbb5c0b2fc40b46dad2a56a2deec656ca040631124f4795823acc68238147805f8b021abbe221f4afed5ef3c8e8efc2024908b
- languageName: node
- linkType: hard
-
-"env-paths@npm:^2.2.0":
- version: 2.2.1
- resolution: "env-paths@npm:2.2.1"
- checksum: 65b5df55a8bab92229ab2b40dad3b387fad24613263d103a97f91c9fe43ceb21965cd3392b1ccb5d77088021e525c4e0481adb309625d0cb94ade1d1fb8dc17e
- languageName: node
- linkType: hard
-
-"err-code@npm:^2.0.2":
- version: 2.0.3
- resolution: "err-code@npm:2.0.3"
- checksum: 8b7b1be20d2de12d2255c0bc2ca638b7af5171142693299416e6a9339bd7d88fc8d7707d913d78e0993176005405a236b066b45666b27b797252c771156ace54
- languageName: node
- linkType: hard
-
-"error-ex@npm:^1.3.1":
- version: 1.3.2
- resolution: "error-ex@npm:1.3.2"
- dependencies:
- is-arrayish: ^0.2.1
- checksum: c1c2b8b65f9c91b0f9d75f0debaa7ec5b35c266c2cac5de412c1a6de86d4cbae04ae44e510378cb14d032d0645a36925d0186f8bb7367bcc629db256b743a001
- languageName: node
- linkType: hard
-
-"escalade@npm:^3.1.1":
- version: 3.1.1
- resolution: "escalade@npm:3.1.1"
- checksum: a3e2a99f07acb74b3ad4989c48ca0c3140f69f923e56d0cba0526240ee470b91010f9d39001f2a4a313841d237ede70a729e92125191ba5d21e74b106800b133
- languageName: node
- linkType: hard
-
-"escape-string-regexp@npm:^1.0.5":
- version: 1.0.5
- resolution: "escape-string-regexp@npm:1.0.5"
- checksum: 6092fda75c63b110c706b6a9bfde8a612ad595b628f0bd2147eea1d3406723020810e591effc7db1da91d80a71a737a313567c5abb3813e8d9c71f4aa595b410
- languageName: node
- linkType: hard
-
-"escape-string-regexp@npm:^2.0.0":
- version: 2.0.0
- resolution: "escape-string-regexp@npm:2.0.0"
- checksum: 9f8a2d5743677c16e85c810e3024d54f0c8dea6424fad3c79ef6666e81dd0846f7437f5e729dfcdac8981bc9e5294c39b4580814d114076b8d36318f46ae4395
- languageName: node
- linkType: hard
-
-"escape-string-regexp@npm:^5.0.0":
- version: 5.0.0
- resolution: "escape-string-regexp@npm:5.0.0"
- checksum: 20daabe197f3cb198ec28546deebcf24b3dbb1a5a269184381b3116d12f0532e06007f4bc8da25669d6a7f8efb68db0758df4cd981f57bc5b57f521a3e12c59e
- languageName: node
- linkType: hard
-
-"esprima@npm:^4.0.0":
- version: 4.0.1
- resolution: "esprima@npm:4.0.1"
- bin:
- esparse: ./bin/esparse.js
- esvalidate: ./bin/esvalidate.js
- checksum: b45bc805a613dbea2835278c306b91aff6173c8d034223fa81498c77dcbce3b2931bf6006db816f62eacd9fd4ea975dfd85a5b7f3c6402cfd050d4ca3c13a628
- languageName: node
- linkType: hard
-
-"estree-walker@npm:^2.0.2":
- version: 2.0.2
- resolution: "estree-walker@npm:2.0.2"
- checksum: 6151e6f9828abe2259e57f5fd3761335bb0d2ebd76dc1a01048ccee22fabcfef3c0859300f6d83ff0d1927849368775ec5a6d265dde2f6de5a1be1721cd94efc
- languageName: node
- linkType: hard
-
-"esutils@npm:^2.0.2":
- version: 2.0.3
- resolution: "esutils@npm:2.0.3"
- checksum: 22b5b08f74737379a840b8ed2036a5fb35826c709ab000683b092d9054e5c2a82c27818f12604bfc2a9a76b90b6834ef081edbc1c7ae30d1627012e067c6ec87
- languageName: node
- linkType: hard
-
-"execa@npm:^5.0.0":
- version: 5.1.1
- resolution: "execa@npm:5.1.1"
- dependencies:
- cross-spawn: ^7.0.3
- get-stream: ^6.0.0
- human-signals: ^2.1.0
- is-stream: ^2.0.0
- merge-stream: ^2.0.0
- npm-run-path: ^4.0.1
- onetime: ^5.1.2
- signal-exit: ^3.0.3
- strip-final-newline: ^2.0.0
- checksum: fba9022c8c8c15ed862847e94c252b3d946036d7547af310e344a527e59021fd8b6bb0723883ea87044dc4f0201f949046993124a42ccb0855cae5bf8c786343
- languageName: node
- linkType: hard
-
-"execspawn@npm:^1.0.1":
- version: 1.0.1
- resolution: "execspawn@npm:1.0.1"
- dependencies:
- util-extend: ^1.0.1
- checksum: fc2be7fb6de7b4c4cd779ca3f6cf4bf19f0fd22e7967194dcec3c379ac7d914587652c933bac774f0b6bba8f15069969921065553f1e19eb58e25ab675f68689
- languageName: node
- linkType: hard
-
-"exit@npm:^0.1.2":
- version: 0.1.2
- resolution: "exit@npm:0.1.2"
- checksum: abc407f07a875c3961e4781dfcb743b58d6c93de9ab263f4f8c9d23bb6da5f9b7764fc773f86b43dd88030444d5ab8abcb611cb680fba8ca075362b77114bba3
- languageName: node
- linkType: hard
-
-"expect@npm:^29.7.0":
- version: 29.7.0
- resolution: "expect@npm:29.7.0"
- dependencies:
- "@jest/expect-utils": ^29.7.0
- jest-get-type: ^29.6.3
- jest-matcher-utils: ^29.7.0
- jest-message-util: ^29.7.0
- jest-util: ^29.7.0
- checksum: 9257f10288e149b81254a0fda8ffe8d54a7061cd61d7515779998b012579d2b8c22354b0eb901daf0145f347403da582f75f359f4810c007182ad3fb318b5c0c
- languageName: node
- linkType: hard
-
-"exponential-backoff@npm:^3.1.1":
- version: 3.1.1
- resolution: "exponential-backoff@npm:3.1.1"
- checksum: 3d21519a4f8207c99f7457287291316306255a328770d320b401114ec8481986e4e467e854cb9914dd965e0a1ca810a23ccb559c642c88f4c7f55c55778a9b48
- languageName: node
- linkType: hard
-
-"extend@npm:^3.0.0":
- version: 3.0.2
- resolution: "extend@npm:3.0.2"
- checksum: a50a8309ca65ea5d426382ff09f33586527882cf532931cb08ca786ea3146c0553310bda688710ff61d7668eba9f96b923fe1420cdf56a2c3eaf30fcab87b515
- languageName: node
- linkType: hard
-
-"fast-json-stable-stringify@npm:^2.1.0":
- version: 2.1.0
- resolution: "fast-json-stable-stringify@npm:2.1.0"
- checksum: b191531e36c607977e5b1c47811158733c34ccb3bfde92c44798929e9b4154884378536d26ad90dfecd32e1ffc09c545d23535ad91b3161a27ddbb8ebe0cbecb
- languageName: node
- linkType: hard
-
-"fb-watchman@npm:^2.0.0":
- version: 2.0.2
- resolution: "fb-watchman@npm:2.0.2"
- dependencies:
- bser: 2.1.1
- checksum: b15a124cef28916fe07b400eb87cbc73ca082c142abf7ca8e8de6af43eca79ca7bd13eb4d4d48240b3bd3136eaac40d16e42d6edf87a8e5d1dd8070626860c78
- languageName: node
- linkType: hard
-
-"fill-range@npm:^7.1.1":
- version: 7.1.1
- resolution: "fill-range@npm:7.1.1"
- dependencies:
- to-regex-range: ^5.0.1
- checksum: b4abfbca3839a3d55e4ae5ec62e131e2e356bf4859ce8480c64c4876100f4df292a63e5bb1618e1d7460282ca2b305653064f01654474aa35c68000980f17798
- languageName: node
- linkType: hard
-
-"find-up@npm:^4.0.0, find-up@npm:^4.1.0":
- version: 4.1.0
- resolution: "find-up@npm:4.1.0"
- dependencies:
- locate-path: ^5.0.0
- path-exists: ^4.0.0
- checksum: 4c172680e8f8c1f78839486e14a43ef82e9decd0e74145f40707cc42e7420506d5ec92d9a11c22bd2c48fb0c384ea05dd30e10dd152fefeec6f2f75282a8b844
- languageName: node
- linkType: hard
-
-"find-up@npm:^6.3.0":
- version: 6.3.0
- resolution: "find-up@npm:6.3.0"
- dependencies:
- locate-path: ^7.1.0
- path-exists: ^5.0.0
- checksum: 9a21b7f9244a420e54c6df95b4f6fc3941efd3c3e5476f8274eb452f6a85706e7a6a90de71353ee4f091fcb4593271a6f92810a324ec542650398f928783c280
- languageName: node
- linkType: hard
-
-"foreground-child@npm:^3.1.0":
- version: 3.1.1
- resolution: "foreground-child@npm:3.1.1"
- dependencies:
- cross-spawn: ^7.0.0
- signal-exit: ^4.0.1
- checksum: 139d270bc82dc9e6f8bc045fe2aae4001dc2472157044fdfad376d0a3457f77857fa883c1c8b21b491c6caade9a926a4bed3d3d2e8d3c9202b151a4cbbd0bcd5
- languageName: node
- linkType: hard
-
-"fs-constants@npm:^1.0.0":
- version: 1.0.0
- resolution: "fs-constants@npm:1.0.0"
- checksum: 18f5b718371816155849475ac36c7d0b24d39a11d91348cfcb308b4494824413e03572c403c86d3a260e049465518c4f0d5bd00f0371cdfcad6d4f30a85b350d
- languageName: node
- linkType: hard
-
-"fs-minipass@npm:^2.0.0, fs-minipass@npm:^2.1.0":
- version: 2.1.0
- resolution: "fs-minipass@npm:2.1.0"
- dependencies:
- minipass: ^3.0.0
- checksum: 1b8d128dae2ac6cc94230cc5ead341ba3e0efaef82dab46a33d171c044caaa6ca001364178d42069b2809c35a1c3c35079a32107c770e9ffab3901b59af8c8b1
- languageName: node
- linkType: hard
-
-"fs-minipass@npm:^3.0.0":
- version: 3.0.3
- resolution: "fs-minipass@npm:3.0.3"
- dependencies:
- minipass: ^7.0.3
- checksum: 8722a41109130851d979222d3ec88aabaceeaaf8f57b2a8f744ef8bd2d1ce95453b04a61daa0078822bc5cd21e008814f06fe6586f56fef511e71b8d2394d802
- languageName: node
- linkType: hard
-
-"fs.realpath@npm:^1.0.0":
- version: 1.0.0
- resolution: "fs.realpath@npm:1.0.0"
- checksum: 99ddea01a7e75aa276c250a04eedeffe5662bce66c65c07164ad6264f9de18fb21be9433ead460e54cff20e31721c811f4fb5d70591799df5f85dce6d6746fd0
- languageName: node
- linkType: hard
-
-"fsevents@npm:^2.3.2, fsevents@npm:~2.3.2":
- version: 2.3.3
- resolution: "fsevents@npm:2.3.3"
- dependencies:
- node-gyp: latest
- checksum: 11e6ea6fea15e42461fc55b4b0e4a0a3c654faa567f1877dbd353f39156f69def97a69936d1746619d656c4b93de2238bf731f6085a03a50cabf287c9d024317
- conditions: os=darwin
- languageName: node
- linkType: hard
-
-"fsevents@patch:fsevents@^2.3.2#~builtin, fsevents@patch:fsevents@~2.3.2#~builtin":
- version: 2.3.3
- resolution: "fsevents@patch:fsevents@npm%3A2.3.3#~builtin::version=2.3.3&hash=df0bf1"
- dependencies:
- node-gyp: latest
- conditions: os=darwin
- languageName: node
- linkType: hard
-
-"function-bind@npm:^1.1.2":
- version: 1.1.2
- resolution: "function-bind@npm:1.1.2"
- checksum: 2b0ff4ce708d99715ad14a6d1f894e2a83242e4a52ccfcefaee5e40050562e5f6dafc1adbb4ce2d4ab47279a45dc736ab91ea5042d843c3c092820dfe032efb1
- languageName: node
- linkType: hard
-
-"gauge@npm:^4.0.3":
- version: 4.0.4
- resolution: "gauge@npm:4.0.4"
- dependencies:
- aproba: ^1.0.3 || ^2.0.0
- color-support: ^1.1.3
- console-control-strings: ^1.1.0
- has-unicode: ^2.0.1
- signal-exit: ^3.0.7
- string-width: ^4.2.3
- strip-ansi: ^6.0.1
- wide-align: ^1.1.5
- checksum: 788b6bfe52f1dd8e263cda800c26ac0ca2ff6de0b6eee2fe0d9e3abf15e149b651bd27bf5226be10e6e3edb5c4e5d5985a5a1a98137e7a892f75eff76467ad2d
- languageName: node
- linkType: hard
-
-"gensync@npm:^1.0.0-beta.2":
- version: 1.0.0-beta.2
- resolution: "gensync@npm:1.0.0-beta.2"
- checksum: a7437e58c6be12aa6c90f7730eac7fa9833dc78872b4ad2963d2031b00a3367a93f98aec75f9aaac7220848e4026d67a8655e870b24f20a543d103c0d65952ec
- languageName: node
- linkType: hard
-
-"get-caller-file@npm:^2.0.5":
- version: 2.0.5
- resolution: "get-caller-file@npm:2.0.5"
- checksum: b9769a836d2a98c3ee734a88ba712e62703f1df31b94b784762c433c27a386dd6029ff55c2a920c392e33657d80191edbf18c61487e198844844516f843496b9
- languageName: node
- linkType: hard
-
-"get-package-type@npm:^0.1.0":
- version: 0.1.0
- resolution: "get-package-type@npm:0.1.0"
- checksum: bba0811116d11e56d702682ddef7c73ba3481f114590e705fc549f4d868972263896af313c57a25c076e3c0d567e11d919a64ba1b30c879be985fc9d44f96148
- languageName: node
- linkType: hard
-
-"get-stream@npm:^6.0.0":
- version: 6.0.1
- resolution: "get-stream@npm:6.0.1"
- checksum: e04ecece32c92eebf5b8c940f51468cd53554dcbb0ea725b2748be583c9523d00128137966afce410b9b051eb2ef16d657cd2b120ca8edafcf5a65e81af63cad
- languageName: node
- linkType: hard
-
-"git-up@npm:^7.0.0":
- version: 7.0.0
- resolution: "git-up@npm:7.0.0"
- dependencies:
- is-ssh: ^1.4.0
- parse-url: ^8.1.0
- checksum: 2faadbab51e94d2ffb220e426e950087cc02c15d664e673bd5d1f734cfa8196fed8b19493f7bf28fe216d087d10e22a7fd9b63687e0ba7d24f0ddcfb0a266d6e
- languageName: node
- linkType: hard
-
-"git-url-parse@npm:^13.1.0":
- version: 13.1.1
- resolution: "git-url-parse@npm:13.1.1"
- dependencies:
- git-up: ^7.0.0
- checksum: 8a6111814f4dfff304149b22c8766dc0a90c10e4ea5b5d103f7c3f14b0a711c7b20fc5a9e03c0e2d29123486ac648f9e19f663d8132f69549bee2de49ee96989
- languageName: node
- linkType: hard
-
-"github-slugger@npm:1.4.0":
- version: 1.4.0
- resolution: "github-slugger@npm:1.4.0"
- checksum: 4f52e7a21f5c6a4c5328f01fe4fe13ae8881fea78bfe31f9e72c4038f97e3e70d52fb85aa7633a52c501dc2486874474d9abd22aa61cbe9b113099a495551c6b
- languageName: node
- linkType: hard
-
-"github-slugger@npm:^2.0.0":
- version: 2.0.0
- resolution: "github-slugger@npm:2.0.0"
- checksum: 250375cde2058f21454872c2c79f72c4637340c30c51ff158ca4ec71cbc478f33d54477d787a662f9207aeb095a2060f155bc01f15329ba8a5fb6698e0fc81f8
- languageName: node
- linkType: hard
-
-"glob-parent@npm:~5.1.2":
- version: 5.1.2
- resolution: "glob-parent@npm:5.1.2"
- dependencies:
- is-glob: ^4.0.1
- checksum: f4f2bfe2425296e8a47e36864e4f42be38a996db40420fe434565e4480e3322f18eb37589617a98640c5dc8fdec1a387007ee18dbb1f3f5553409c34d17f425e
- languageName: node
- linkType: hard
-
-"glob@npm:^10.2.2, glob@npm:^10.3.10":
- version: 10.3.10
- resolution: "glob@npm:10.3.10"
- dependencies:
- foreground-child: ^3.1.0
- jackspeak: ^2.3.5
- minimatch: ^9.0.1
- minipass: ^5.0.0 || ^6.0.2 || ^7.0.0
- path-scurry: ^1.10.1
- bin:
- glob: dist/esm/bin.mjs
- checksum: 4f2fe2511e157b5a3f525a54092169a5f92405f24d2aed3142f4411df328baca13059f4182f1db1bf933e2c69c0bd89e57ae87edd8950cba8c7ccbe84f721cf3
- languageName: node
- linkType: hard
-
-"glob@npm:^7.1.3, glob@npm:^7.1.4":
- version: 7.2.3
- resolution: "glob@npm:7.2.3"
- dependencies:
- fs.realpath: ^1.0.0
- inflight: ^1.0.4
- inherits: 2
- minimatch: ^3.1.1
- once: ^1.3.0
- path-is-absolute: ^1.0.0
- checksum: 29452e97b38fa704dabb1d1045350fb2467cf0277e155aa9ff7077e90ad81d1ea9d53d3ee63bd37c05b09a065e90f16aec4a65f5b8de401d1dac40bc5605d133
- languageName: node
- linkType: hard
-
-"glob@npm:^8.0.1, glob@npm:^8.0.3":
- version: 8.1.0
- resolution: "glob@npm:8.1.0"
- dependencies:
- fs.realpath: ^1.0.0
- inflight: ^1.0.4
- inherits: 2
- minimatch: ^5.0.1
- once: ^1.3.0
- checksum: 92fbea3221a7d12075f26f0227abac435de868dd0736a17170663783296d0dd8d3d532a5672b4488a439bf5d7fb85cdd07c11185d6cd39184f0385cbdfb86a47
- languageName: node
- linkType: hard
-
-"globals-docs@npm:^2.4.1":
- version: 2.4.1
- resolution: "globals-docs@npm:2.4.1"
- checksum: 78980199c990b089ebb28a15c9a463a6f9034127b256140c44879dffdbd2dc0bd26fb51979a94373e81e5bee79d090e3e549749d68fe6d940c54c4891944a827
- languageName: node
- linkType: hard
-
-"globals@npm:^11.1.0":
- version: 11.12.0
- resolution: "globals@npm:11.12.0"
- checksum: 67051a45eca3db904aee189dfc7cd53c20c7d881679c93f6146ddd4c9f4ab2268e68a919df740d39c71f4445d2b38ee360fc234428baea1dbdfe68bbcb46979e
- languageName: node
- linkType: hard
-
-"gpt4all@workspace:.":
- version: 0.0.0-use.local
- resolution: "gpt4all@workspace:."
- dependencies:
- "@types/node": ^20.1.5
- documentation: ^14.0.2
- jest: ^29.5.0
- md5-file: ^5.0.0
- node-addon-api: ^6.1.0
- node-gyp: 9.x.x
- node-gyp-build: ^4.6.0
- prebuildify: ^5.0.1
- prettier: ^2.8.8
- dependenciesMeta:
- node-gyp:
- optional: true
- languageName: unknown
- linkType: soft
-
-"graceful-fs@npm:^4.2.6, graceful-fs@npm:^4.2.9":
- version: 4.2.11
- resolution: "graceful-fs@npm:4.2.11"
- checksum: ac85f94da92d8eb6b7f5a8b20ce65e43d66761c55ce85ac96df6865308390da45a8d3f0296dd3a663de65d30ba497bd46c696cc1e248c72b13d6d567138a4fc7
- languageName: node
- linkType: hard
-
-"has-flag@npm:^3.0.0":
- version: 3.0.0
- resolution: "has-flag@npm:3.0.0"
- checksum: 4a15638b454bf086c8148979aae044dd6e39d63904cd452d970374fa6a87623423da485dfb814e7be882e05c096a7ccf1ebd48e7e7501d0208d8384ff4dea73b
- languageName: node
- linkType: hard
-
-"has-flag@npm:^4.0.0":
- version: 4.0.0
- resolution: "has-flag@npm:4.0.0"
- checksum: 261a1357037ead75e338156b1f9452c016a37dcd3283a972a30d9e4a87441ba372c8b81f818cd0fbcd9c0354b4ae7e18b9e1afa1971164aef6d18c2b6095a8ad
- languageName: node
- linkType: hard
-
-"has-unicode@npm:^2.0.1":
- version: 2.0.1
- resolution: "has-unicode@npm:2.0.1"
- checksum: 1eab07a7436512db0be40a710b29b5dc21fa04880b7f63c9980b706683127e3c1b57cb80ea96d47991bdae2dfe479604f6a1ba410106ee1046a41d1bd0814400
- languageName: node
- linkType: hard
-
-"hasown@npm:^2.0.0":
- version: 2.0.0
- resolution: "hasown@npm:2.0.0"
- dependencies:
- function-bind: ^1.1.2
- checksum: 6151c75ca12554565098641c98a40f4cc86b85b0fd5b6fe92360967e4605a4f9610f7757260b4e8098dd1c2ce7f4b095f2006fe72a570e3b6d2d28de0298c176
- languageName: node
- linkType: hard
-
-"hast-util-from-parse5@npm:^7.0.0":
- version: 7.1.2
- resolution: "hast-util-from-parse5@npm:7.1.2"
- dependencies:
- "@types/hast": ^2.0.0
- "@types/unist": ^2.0.0
- hastscript: ^7.0.0
- property-information: ^6.0.0
- vfile: ^5.0.0
- vfile-location: ^4.0.0
- web-namespaces: ^2.0.0
- checksum: 7b4ed5b508b1352127c6719f7b0c0880190cf9859fe54ccaf7c9228ecf623d36cef3097910b3874d2fe1aac6bf4cf45d3cc2303daac3135a05e9ade6534ddddb
- languageName: node
- linkType: hard
-
-"hast-util-parse-selector@npm:^3.0.0":
- version: 3.1.1
- resolution: "hast-util-parse-selector@npm:3.1.1"
- dependencies:
- "@types/hast": ^2.0.0
- checksum: 511d373465f60dd65e924f88bf0954085f4fb6e3a2b062a4b5ac43b93cbfd36a8dce6234b5d1e3e63499d936375687e83fc5da55628b22bd6b581b5ee167d1c4
- languageName: node
- linkType: hard
-
-"hast-util-raw@npm:^7.0.0":
- version: 7.2.3
- resolution: "hast-util-raw@npm:7.2.3"
- dependencies:
- "@types/hast": ^2.0.0
- "@types/parse5": ^6.0.0
- hast-util-from-parse5: ^7.0.0
- hast-util-to-parse5: ^7.0.0
- html-void-elements: ^2.0.0
- parse5: ^6.0.0
- unist-util-position: ^4.0.0
- unist-util-visit: ^4.0.0
- vfile: ^5.0.0
- web-namespaces: ^2.0.0
- zwitch: ^2.0.0
- checksum: 21857eea3ffb8fd92d2d9be7793b56d0b2c40db03c4cfa14828855ae41d7c584917aa83efb7157220b2e41e25e95f81f24679ac342c35145e5f1c1d39015f81f
- languageName: node
- linkType: hard
-
-"hast-util-sanitize@npm:^4.0.0":
- version: 4.1.0
- resolution: "hast-util-sanitize@npm:4.1.0"
- dependencies:
- "@types/hast": ^2.0.0
- checksum: 4f1786d6556bae6485a657a3e77e7e71b573fd20e4e2d70678e0f445eb8fe3dc6c4441cda6d18b89a79b53e2c03b6232eb6c470ecd478737050724ea09398603
- languageName: node
- linkType: hard
-
-"hast-util-to-html@npm:^8.0.0":
- version: 8.0.4
- resolution: "hast-util-to-html@npm:8.0.4"
- dependencies:
- "@types/hast": ^2.0.0
- "@types/unist": ^2.0.0
- ccount: ^2.0.0
- comma-separated-tokens: ^2.0.0
- hast-util-raw: ^7.0.0
- hast-util-whitespace: ^2.0.0
- html-void-elements: ^2.0.0
- property-information: ^6.0.0
- space-separated-tokens: ^2.0.0
- stringify-entities: ^4.0.0
- zwitch: ^2.0.4
- checksum: 8f2ae071df2ced5afb4f19f76af8fd3a2f837dc47bcc1c0e0c1578d29dafcd28738f9617505d13c4a2adf13d70e043143e2ad8f130d5554ab4fc11bfa8f74094
- languageName: node
- linkType: hard
-
-"hast-util-to-parse5@npm:^7.0.0":
- version: 7.1.0
- resolution: "hast-util-to-parse5@npm:7.1.0"
- dependencies:
- "@types/hast": ^2.0.0
- comma-separated-tokens: ^2.0.0
- property-information: ^6.0.0
- space-separated-tokens: ^2.0.0
- web-namespaces: ^2.0.0
- zwitch: ^2.0.0
- checksum: 3a7f2175a3db599bbae7e49ba73d3e5e688e5efca7590ff50130ba108ad649f728402815d47db49146f6b94c14c934bf119915da9f6964e38802c122bcc8af6b
- languageName: node
- linkType: hard
-
-"hast-util-whitespace@npm:^2.0.0":
- version: 2.0.1
- resolution: "hast-util-whitespace@npm:2.0.1"
- checksum: 431be6b2f35472f951615540d7a53f69f39461e5e080c0190268bdeb2be9ab9b1dddfd1f467dd26c1de7e7952df67beb1307b6ee940baf78b24a71b5e0663868
- languageName: node
- linkType: hard
-
-"hastscript@npm:^7.0.0":
- version: 7.2.0
- resolution: "hastscript@npm:7.2.0"
- dependencies:
- "@types/hast": ^2.0.0
- comma-separated-tokens: ^2.0.0
- hast-util-parse-selector: ^3.0.0
- property-information: ^6.0.0
- space-separated-tokens: ^2.0.0
- checksum: 928a21576ff7b9a8c945e7940bcbf2d27f770edb4279d4d04b33dc90753e26ca35c1172d626f54afebd377b2afa32331e399feb3eb0f7b91a399dca5927078ae
- languageName: node
- linkType: hard
-
-"he@npm:^1.2.0":
- version: 1.2.0
- resolution: "he@npm:1.2.0"
- bin:
- he: bin/he
- checksum: 3d4d6babccccd79c5c5a3f929a68af33360d6445587d628087f39a965079d84f18ce9c3d3f917ee1e3978916fc833bb8b29377c3b403f919426f91bc6965e7a7
- languageName: node
- linkType: hard
-
-"highlight.js@npm:^11.6.0":
- version: 11.9.0
- resolution: "highlight.js@npm:11.9.0"
- checksum: 4043d31c5de9d27d13387d9a9e5e1939557254b7b85f0fab85d9cae0e420e131a3456ebf6148552020a1d8a216d671d583f2433d6c4de6179b8a66487a8325cb
- languageName: node
- linkType: hard
-
-"hosted-git-info@npm:^4.0.1":
- version: 4.1.0
- resolution: "hosted-git-info@npm:4.1.0"
- dependencies:
- lru-cache: ^6.0.0
- checksum: c3f87b3c2f7eb8c2748c8f49c0c2517c9a95f35d26f4bf54b2a8cba05d2e668f3753548b6ea366b18ec8dadb4e12066e19fa382a01496b0ffa0497eb23cbe461
- languageName: node
- linkType: hard
-
-"html-escaper@npm:^2.0.0":
- version: 2.0.2
- resolution: "html-escaper@npm:2.0.2"
- checksum: d2df2da3ad40ca9ee3a39c5cc6475ef67c8f83c234475f24d8e9ce0dc80a2c82df8e1d6fa78ddd1e9022a586ea1bd247a615e80a5cd9273d90111ddda7d9e974
- languageName: node
- linkType: hard
-
-"html-void-elements@npm:^2.0.0":
- version: 2.0.1
- resolution: "html-void-elements@npm:2.0.1"
- checksum: 06d41f13b9d5d6e0f39861c4bec9a9196fa4906d56cd5cf6cf54ad2e52a85bf960cca2bf9600026bde16c8331db171bedba5e5a35e2e43630c8f1d497b2fb658
- languageName: node
- linkType: hard
-
-"http-cache-semantics@npm:^4.1.0, http-cache-semantics@npm:^4.1.1":
- version: 4.1.1
- resolution: "http-cache-semantics@npm:4.1.1"
- checksum: 83ac0bc60b17a3a36f9953e7be55e5c8f41acc61b22583060e8dedc9dd5e3607c823a88d0926f9150e571f90946835c7fe150732801010845c72cd8bbff1a236
- languageName: node
- linkType: hard
-
-"http-proxy-agent@npm:^5.0.0":
- version: 5.0.0
- resolution: "http-proxy-agent@npm:5.0.0"
- dependencies:
- "@tootallnate/once": 2
- agent-base: 6
- debug: 4
- checksum: e2ee1ff1656a131953839b2a19cd1f3a52d97c25ba87bd2559af6ae87114abf60971e498021f9b73f9fd78aea8876d1fb0d4656aac8a03c6caa9fc175f22b786
- languageName: node
- linkType: hard
-
-"http-proxy-agent@npm:^7.0.0":
- version: 7.0.0
- resolution: "http-proxy-agent@npm:7.0.0"
- dependencies:
- agent-base: ^7.1.0
- debug: ^4.3.4
- checksum: 48d4fac997917e15f45094852b63b62a46d0c8a4f0b9c6c23ca26d27b8df8d178bed88389e604745e748bd9a01f5023e25093722777f0593c3f052009ff438b6
- languageName: node
- linkType: hard
-
-"https-proxy-agent@npm:^5.0.0":
- version: 5.0.1
- resolution: "https-proxy-agent@npm:5.0.1"
- dependencies:
- agent-base: 6
- debug: 4
- checksum: 571fccdf38184f05943e12d37d6ce38197becdd69e58d03f43637f7fa1269cf303a7d228aa27e5b27bbd3af8f09fd938e1c91dcfefff2df7ba77c20ed8dfc765
- languageName: node
- linkType: hard
-
-"https-proxy-agent@npm:^7.0.1":
- version: 7.0.2
- resolution: "https-proxy-agent@npm:7.0.2"
- dependencies:
- agent-base: ^7.0.2
- debug: 4
- checksum: 088969a0dd476ea7a0ed0a2cf1283013682b08f874c3bc6696c83fa061d2c157d29ef0ad3eb70a2046010bb7665573b2388d10fdcb3e410a66995e5248444292
- languageName: node
- linkType: hard
-
-"human-signals@npm:^2.1.0":
- version: 2.1.0
- resolution: "human-signals@npm:2.1.0"
- checksum: b87fd89fce72391625271454e70f67fe405277415b48bcc0117ca73d31fa23a4241787afdc8d67f5a116cf37258c052f59ea82daffa72364d61351423848e3b8
- languageName: node
- linkType: hard
-
-"humanize-ms@npm:^1.2.1":
- version: 1.2.1
- resolution: "humanize-ms@npm:1.2.1"
- dependencies:
- ms: ^2.0.0
- checksum: 9c7a74a2827f9294c009266c82031030eae811ca87b0da3dceb8d6071b9bde22c9f3daef0469c3c533cc67a97d8a167cd9fc0389350e5f415f61a79b171ded16
- languageName: node
- linkType: hard
-
-"iconv-lite@npm:^0.6.2":
- version: 0.6.3
- resolution: "iconv-lite@npm:0.6.3"
- dependencies:
- safer-buffer: ">= 2.1.2 < 3.0.0"
- checksum: 3f60d47a5c8fc3313317edfd29a00a692cc87a19cac0159e2ce711d0ebc9019064108323b5e493625e25594f11c6236647d8e256fbe7a58f4a3b33b89e6d30bf
- languageName: node
- linkType: hard
-
-"ieee754@npm:^1.1.13":
- version: 1.2.1
- resolution: "ieee754@npm:1.2.1"
- checksum: 5144c0c9815e54ada181d80a0b810221a253562422e7c6c3a60b1901154184f49326ec239d618c416c1c5945a2e197107aee8d986a3dd836b53dffefd99b5e7e
- languageName: node
- linkType: hard
-
-"import-local@npm:^3.0.2":
- version: 3.1.0
- resolution: "import-local@npm:3.1.0"
- dependencies:
- pkg-dir: ^4.2.0
- resolve-cwd: ^3.0.0
- bin:
- import-local-fixture: fixtures/cli.js
- checksum: bfcdb63b5e3c0e245e347f3107564035b128a414c4da1172a20dc67db2504e05ede4ac2eee1252359f78b0bfd7b19ef180aec427c2fce6493ae782d73a04cddd
- languageName: node
- linkType: hard
-
-"imurmurhash@npm:^0.1.4":
- version: 0.1.4
- resolution: "imurmurhash@npm:0.1.4"
- checksum: 7cae75c8cd9a50f57dadd77482359f659eaebac0319dd9368bcd1714f55e65badd6929ca58569da2b6494ef13fdd5598cd700b1eba23f8b79c5f19d195a3ecf7
- languageName: node
- linkType: hard
-
-"indent-string@npm:^4.0.0":
- version: 4.0.0
- resolution: "indent-string@npm:4.0.0"
- checksum: 824cfb9929d031dabf059bebfe08cf3137365e112019086ed3dcff6a0a7b698cb80cf67ccccde0e25b9e2d7527aa6cc1fed1ac490c752162496caba3e6699612
- languageName: node
- linkType: hard
-
-"infer-owner@npm:^1.0.4":
- version: 1.0.4
- resolution: "infer-owner@npm:1.0.4"
- checksum: 181e732764e4a0611576466b4b87dac338972b839920b2a8cde43642e4ed6bd54dc1fb0b40874728f2a2df9a1b097b8ff83b56d5f8f8e3927f837fdcb47d8a89
- languageName: node
- linkType: hard
-
-"inflight@npm:^1.0.4":
- version: 1.0.6
- resolution: "inflight@npm:1.0.6"
- dependencies:
- once: ^1.3.0
- wrappy: 1
- checksum: f4f76aa072ce19fae87ce1ef7d221e709afb59d445e05d47fba710e85470923a75de35bfae47da6de1b18afc3ce83d70facf44cfb0aff89f0a3f45c0a0244dfd
- languageName: node
- linkType: hard
-
-"inherits@npm:2, inherits@npm:^2.0.3, inherits@npm:^2.0.4":
- version: 2.0.4
- resolution: "inherits@npm:2.0.4"
- checksum: 4a48a733847879d6cf6691860a6b1e3f0f4754176e4d71494c41f3475553768b10f84b5ce1d40fbd0e34e6bfbb864ee35858ad4dd2cf31e02fc4a154b724d7f1
- languageName: node
- linkType: hard
-
-"ini@npm:^3.0.0":
- version: 3.0.1
- resolution: "ini@npm:3.0.1"
- checksum: 947b582a822f06df3c22c75c90aec217d604ea11f7a20249530ee5c1cf8f508288439abe17b0e1d9b421bda5f4fae5e7aae0b18cb3ded5ac9d68f607df82f10f
- languageName: node
- linkType: hard
-
-"ip@npm:^2.0.0":
- version: 2.0.1
- resolution: "ip@npm:2.0.1"
- checksum: d765c9fd212b8a99023a4cde6a558a054c298d640fec1020567494d257afd78ca77e37126b1a3ef0e053646ced79a816bf50621d38d5e768cdde0431fa3b0d35
- languageName: node
- linkType: hard
-
-"is-absolute@npm:^1.0.0":
- version: 1.0.0
- resolution: "is-absolute@npm:1.0.0"
- dependencies:
- is-relative: ^1.0.0
- is-windows: ^1.0.1
- checksum: 9d16b2605eda3f3ce755410f1d423e327ad3a898bcb86c9354cf63970ed3f91ba85e9828aa56f5d6a952b9fae43d0477770f78d37409ae8ecc31e59ebc279b27
- languageName: node
- linkType: hard
-
-"is-arrayish@npm:^0.2.1":
- version: 0.2.1
- resolution: "is-arrayish@npm:0.2.1"
- checksum: eef4417e3c10e60e2c810b6084942b3ead455af16c4509959a27e490e7aee87cfb3f38e01bbde92220b528a0ee1a18d52b787e1458ee86174d8c7f0e58cd488f
- languageName: node
- linkType: hard
-
-"is-binary-path@npm:~2.1.0":
- version: 2.1.0
- resolution: "is-binary-path@npm:2.1.0"
- dependencies:
- binary-extensions: ^2.0.0
- checksum: 84192eb88cff70d320426f35ecd63c3d6d495da9d805b19bc65b518984b7c0760280e57dbf119b7e9be6b161784a5a673ab2c6abe83abb5198a432232ad5b35c
- languageName: node
- linkType: hard
-
-"is-buffer@npm:^2.0.0":
- version: 2.0.5
- resolution: "is-buffer@npm:2.0.5"
- checksum: 764c9ad8b523a9f5a32af29bdf772b08eb48c04d2ad0a7240916ac2688c983bf5f8504bf25b35e66240edeb9d9085461f9b5dae1f3d2861c6b06a65fe983de42
- languageName: node
- linkType: hard
-
-"is-core-module@npm:^2.13.0, is-core-module@npm:^2.5.0":
- version: 2.13.1
- resolution: "is-core-module@npm:2.13.1"
- dependencies:
- hasown: ^2.0.0
- checksum: 256559ee8a9488af90e4bad16f5583c6d59e92f0742e9e8bb4331e758521ee86b810b93bae44f390766ffbc518a0488b18d9dab7da9a5ff997d499efc9403f7c
- languageName: node
- linkType: hard
-
-"is-extglob@npm:^2.1.1":
- version: 2.1.1
- resolution: "is-extglob@npm:2.1.1"
- checksum: df033653d06d0eb567461e58a7a8c9f940bd8c22274b94bf7671ab36df5719791aae15eef6d83bbb5e23283967f2f984b8914559d4449efda578c775c4be6f85
- languageName: node
- linkType: hard
-
-"is-fullwidth-code-point@npm:^3.0.0":
- version: 3.0.0
- resolution: "is-fullwidth-code-point@npm:3.0.0"
- checksum: 44a30c29457c7fb8f00297bce733f0a64cd22eca270f83e58c105e0d015e45c019491a4ab2faef91ab51d4738c670daff901c799f6a700e27f7314029e99e348
- languageName: node
- linkType: hard
-
-"is-generator-fn@npm:^2.0.0":
- version: 2.1.0
- resolution: "is-generator-fn@npm:2.1.0"
- checksum: a6ad5492cf9d1746f73b6744e0c43c0020510b59d56ddcb78a91cbc173f09b5e6beff53d75c9c5a29feb618bfef2bf458e025ecf3a57ad2268e2fb2569f56215
- languageName: node
- linkType: hard
-
-"is-glob@npm:^4.0.1, is-glob@npm:~4.0.1":
- version: 4.0.3
- resolution: "is-glob@npm:4.0.3"
- dependencies:
- is-extglob: ^2.1.1
- checksum: d381c1319fcb69d341cc6e6c7cd588e17cd94722d9a32dbd60660b993c4fb7d0f19438674e68dfec686d09b7c73139c9166b47597f846af387450224a8101ab4
- languageName: node
- linkType: hard
-
-"is-lambda@npm:^1.0.1":
- version: 1.0.1
- resolution: "is-lambda@npm:1.0.1"
- checksum: 93a32f01940220532e5948538699ad610d5924ac86093fcee83022252b363eb0cc99ba53ab084a04e4fb62bf7b5731f55496257a4c38adf87af9c4d352c71c35
- languageName: node
- linkType: hard
-
-"is-number@npm:^7.0.0":
- version: 7.0.0
- resolution: "is-number@npm:7.0.0"
- checksum: 456ac6f8e0f3111ed34668a624e45315201dff921e5ac181f8ec24923b99e9f32ca1a194912dc79d539c97d33dba17dc635202ff0b2cf98326f608323276d27a
- languageName: node
- linkType: hard
-
-"is-plain-obj@npm:^4.0.0":
- version: 4.1.0
- resolution: "is-plain-obj@npm:4.1.0"
- checksum: 6dc45da70d04a81f35c9310971e78a6a3c7a63547ef782e3a07ee3674695081b6ca4e977fbb8efc48dae3375e0b34558d2bcd722aec9bddfa2d7db5b041be8ce
- languageName: node
- linkType: hard
-
-"is-relative@npm:^1.0.0":
- version: 1.0.0
- resolution: "is-relative@npm:1.0.0"
- dependencies:
- is-unc-path: ^1.0.0
- checksum: 3271a0df109302ef5e14a29dcd5d23d9788e15ade91a40b942b035827ffbb59f7ce9ff82d036ea798541a52913cbf9d2d0b66456340887b51f3542d57b5a4c05
- languageName: node
- linkType: hard
-
-"is-ssh@npm:^1.4.0":
- version: 1.4.0
- resolution: "is-ssh@npm:1.4.0"
- dependencies:
- protocols: ^2.0.1
- checksum: 75eaa17b538bee24b661fbeb0f140226ac77e904a6039f787bea418431e2162f1f9c4c4ccad3bd169e036cd701cc631406e8c505d9fa7e20164e74b47f86f40f
- languageName: node
- linkType: hard
-
-"is-stream@npm:^2.0.0":
- version: 2.0.1
- resolution: "is-stream@npm:2.0.1"
- checksum: b8e05ccdf96ac330ea83c12450304d4a591f9958c11fd17bed240af8d5ffe08aedafa4c0f4cfccd4d28dc9d4d129daca1023633d5c11601a6cbc77521f6fae66
- languageName: node
- linkType: hard
-
-"is-unc-path@npm:^1.0.0":
- version: 1.0.0
- resolution: "is-unc-path@npm:1.0.0"
- dependencies:
- unc-path-regex: ^0.1.2
- checksum: e8abfde203f7409f5b03a5f1f8636e3a41e78b983702ef49d9343eb608cdfe691429398e8815157519b987b739bcfbc73ae7cf4c8582b0ab66add5171088eab6
- languageName: node
- linkType: hard
-
-"is-windows@npm:^1.0.1":
- version: 1.0.2
- resolution: "is-windows@npm:1.0.2"
- checksum: 438b7e52656fe3b9b293b180defb4e448088e7023a523ec21a91a80b9ff8cdb3377ddb5b6e60f7c7de4fa8b63ab56e121b6705fe081b3cf1b828b0a380009ad7
- languageName: node
- linkType: hard
-
-"isexe@npm:^2.0.0":
- version: 2.0.0
- resolution: "isexe@npm:2.0.0"
- checksum: 26bf6c5480dda5161c820c5b5c751ae1e766c587b1f951ea3fcfc973bafb7831ae5b54a31a69bd670220e42e99ec154475025a468eae58ea262f813fdc8d1c62
- languageName: node
- linkType: hard
-
-"isexe@npm:^3.1.1":
- version: 3.1.1
- resolution: "isexe@npm:3.1.1"
- checksum: 7fe1931ee4e88eb5aa524cd3ceb8c882537bc3a81b02e438b240e47012eef49c86904d0f0e593ea7c3a9996d18d0f1f3be8d3eaa92333977b0c3a9d353d5563e
- languageName: node
- linkType: hard
-
-"istanbul-lib-coverage@npm:^3.0.0, istanbul-lib-coverage@npm:^3.2.0":
- version: 3.2.2
- resolution: "istanbul-lib-coverage@npm:3.2.2"
- checksum: 2367407a8d13982d8f7a859a35e7f8dd5d8f75aae4bb5484ede3a9ea1b426dc245aff28b976a2af48ee759fdd9be374ce2bd2669b644f31e76c5f46a2e29a831
- languageName: node
- linkType: hard
-
-"istanbul-lib-instrument@npm:^5.0.4":
- version: 5.2.1
- resolution: "istanbul-lib-instrument@npm:5.2.1"
- dependencies:
- "@babel/core": ^7.12.3
- "@babel/parser": ^7.14.7
- "@istanbuljs/schema": ^0.1.2
- istanbul-lib-coverage: ^3.2.0
- semver: ^6.3.0
- checksum: bf16f1803ba5e51b28bbd49ed955a736488381e09375d830e42ddeb403855b2006f850711d95ad726f2ba3f1ae8e7366de7e51d2b9ac67dc4d80191ef7ddf272
- languageName: node
- linkType: hard
-
-"istanbul-lib-instrument@npm:^6.0.0":
- version: 6.0.1
- resolution: "istanbul-lib-instrument@npm:6.0.1"
- dependencies:
- "@babel/core": ^7.12.3
- "@babel/parser": ^7.14.7
- "@istanbuljs/schema": ^0.1.2
- istanbul-lib-coverage: ^3.2.0
- semver: ^7.5.4
- checksum: fb23472e739cfc9b027cefcd7d551d5e7ca7ff2817ae5150fab99fe42786a7f7b56a29a2aa8309c37092e18297b8003f9c274f50ca4360949094d17fbac81472
- languageName: node
- linkType: hard
-
-"istanbul-lib-report@npm:^3.0.0":
- version: 3.0.1
- resolution: "istanbul-lib-report@npm:3.0.1"
- dependencies:
- istanbul-lib-coverage: ^3.0.0
- make-dir: ^4.0.0
- supports-color: ^7.1.0
- checksum: fd17a1b879e7faf9bb1dc8f80b2a16e9f5b7b8498fe6ed580a618c34df0bfe53d2abd35bf8a0a00e628fb7405462576427c7df20bbe4148d19c14b431c974b21
- languageName: node
- linkType: hard
-
-"istanbul-lib-source-maps@npm:^4.0.0":
- version: 4.0.1
- resolution: "istanbul-lib-source-maps@npm:4.0.1"
- dependencies:
- debug: ^4.1.1
- istanbul-lib-coverage: ^3.0.0
- source-map: ^0.6.1
- checksum: 21ad3df45db4b81852b662b8d4161f6446cd250c1ddc70ef96a585e2e85c26ed7cd9c2a396a71533cfb981d1a645508bc9618cae431e55d01a0628e7dec62ef2
- languageName: node
- linkType: hard
-
-"istanbul-reports@npm:^3.1.3":
- version: 3.1.6
- resolution: "istanbul-reports@npm:3.1.6"
- dependencies:
- html-escaper: ^2.0.0
- istanbul-lib-report: ^3.0.0
- checksum: 44c4c0582f287f02341e9720997f9e82c071627e1e862895745d5f52ec72c9b9f38e1d12370015d2a71dcead794f34c7732aaef3fab80a24bc617a21c3d911d6
- languageName: node
- linkType: hard
-
-"jackspeak@npm:^2.3.5":
- version: 2.3.6
- resolution: "jackspeak@npm:2.3.6"
- dependencies:
- "@isaacs/cliui": ^8.0.2
- "@pkgjs/parseargs": ^0.11.0
- dependenciesMeta:
- "@pkgjs/parseargs":
- optional: true
- checksum: 57d43ad11eadc98cdfe7496612f6bbb5255ea69fe51ea431162db302c2a11011642f50cfad57288bd0aea78384a0612b16e131944ad8ecd09d619041c8531b54
- languageName: node
- linkType: hard
-
-"jest-changed-files@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-changed-files@npm:29.7.0"
- dependencies:
- execa: ^5.0.0
- jest-util: ^29.7.0
- p-limit: ^3.1.0
- checksum: 963e203893c396c5dfc75e00a49426688efea7361b0f0e040035809cecd2d46b3c01c02be2d9e8d38b1138357d2de7719ea5b5be21f66c10f2e9685a5a73bb99
- languageName: node
- linkType: hard
-
-"jest-circus@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-circus@npm:29.7.0"
- dependencies:
- "@jest/environment": ^29.7.0
- "@jest/expect": ^29.7.0
- "@jest/test-result": ^29.7.0
- "@jest/types": ^29.6.3
- "@types/node": "*"
- chalk: ^4.0.0
- co: ^4.6.0
- dedent: ^1.0.0
- is-generator-fn: ^2.0.0
- jest-each: ^29.7.0
- jest-matcher-utils: ^29.7.0
- jest-message-util: ^29.7.0
- jest-runtime: ^29.7.0
- jest-snapshot: ^29.7.0
- jest-util: ^29.7.0
- p-limit: ^3.1.0
- pretty-format: ^29.7.0
- pure-rand: ^6.0.0
- slash: ^3.0.0
- stack-utils: ^2.0.3
- checksum: 349437148924a5a109c9b8aad6d393a9591b4dac1918fc97d81b7fc515bc905af9918495055071404af1fab4e48e4b04ac3593477b1d5dcf48c4e71b527c70a7
- languageName: node
- linkType: hard
-
-"jest-cli@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-cli@npm:29.7.0"
- dependencies:
- "@jest/core": ^29.7.0
- "@jest/test-result": ^29.7.0
- "@jest/types": ^29.6.3
- chalk: ^4.0.0
- create-jest: ^29.7.0
- exit: ^0.1.2
- import-local: ^3.0.2
- jest-config: ^29.7.0
- jest-util: ^29.7.0
- jest-validate: ^29.7.0
- yargs: ^17.3.1
- peerDependencies:
- node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0
- peerDependenciesMeta:
- node-notifier:
- optional: true
- bin:
- jest: bin/jest.js
- checksum: 664901277a3f5007ea4870632ed6e7889db9da35b2434e7cb488443e6bf5513889b344b7fddf15112135495b9875892b156faeb2d7391ddb9e2a849dcb7b6c36
- languageName: node
- linkType: hard
-
-"jest-config@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-config@npm:29.7.0"
- dependencies:
- "@babel/core": ^7.11.6
- "@jest/test-sequencer": ^29.7.0
- "@jest/types": ^29.6.3
- babel-jest: ^29.7.0
- chalk: ^4.0.0
- ci-info: ^3.2.0
- deepmerge: ^4.2.2
- glob: ^7.1.3
- graceful-fs: ^4.2.9
- jest-circus: ^29.7.0
- jest-environment-node: ^29.7.0
- jest-get-type: ^29.6.3
- jest-regex-util: ^29.6.3
- jest-resolve: ^29.7.0
- jest-runner: ^29.7.0
- jest-util: ^29.7.0
- jest-validate: ^29.7.0
- micromatch: ^4.0.4
- parse-json: ^5.2.0
- pretty-format: ^29.7.0
- slash: ^3.0.0
- strip-json-comments: ^3.1.1
- peerDependencies:
- "@types/node": "*"
- ts-node: ">=9.0.0"
- peerDependenciesMeta:
- "@types/node":
- optional: true
- ts-node:
- optional: true
- checksum: 4cabf8f894c180cac80b7df1038912a3fc88f96f2622de33832f4b3314f83e22b08fb751da570c0ab2b7988f21604bdabade95e3c0c041068ac578c085cf7dff
- languageName: node
- linkType: hard
-
-"jest-diff@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-diff@npm:29.7.0"
- dependencies:
- chalk: ^4.0.0
- diff-sequences: ^29.6.3
- jest-get-type: ^29.6.3
- pretty-format: ^29.7.0
- checksum: 08e24a9dd43bfba1ef07a6374e5af138f53137b79ec3d5cc71a2303515335898888fa5409959172e1e05de966c9e714368d15e8994b0af7441f0721ee8e1bb77
- languageName: node
- linkType: hard
-
-"jest-docblock@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-docblock@npm:29.7.0"
- dependencies:
- detect-newline: ^3.0.0
- checksum: 66390c3e9451f8d96c5da62f577a1dad701180cfa9b071c5025acab2f94d7a3efc2515cfa1654ebe707213241541ce9c5530232cdc8017c91ed64eea1bd3b192
- languageName: node
- linkType: hard
-
-"jest-each@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-each@npm:29.7.0"
- dependencies:
- "@jest/types": ^29.6.3
- chalk: ^4.0.0
- jest-get-type: ^29.6.3
- jest-util: ^29.7.0
- pretty-format: ^29.7.0
- checksum: e88f99f0184000fc8813f2a0aa79e29deeb63700a3b9b7928b8a418d7d93cd24933608591dbbdea732b473eb2021c72991b5cc51a17966842841c6e28e6f691c
- languageName: node
- linkType: hard
-
-"jest-environment-node@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-environment-node@npm:29.7.0"
- dependencies:
- "@jest/environment": ^29.7.0
- "@jest/fake-timers": ^29.7.0
- "@jest/types": ^29.6.3
- "@types/node": "*"
- jest-mock: ^29.7.0
- jest-util: ^29.7.0
- checksum: 501a9966292cbe0ca3f40057a37587cb6def25e1e0c5e39ac6c650fe78d3c70a2428304341d084ac0cced5041483acef41c477abac47e9a290d5545fd2f15646
- languageName: node
- linkType: hard
-
-"jest-get-type@npm:^29.6.3":
- version: 29.6.3
- resolution: "jest-get-type@npm:29.6.3"
- checksum: 88ac9102d4679d768accae29f1e75f592b760b44277df288ad76ce5bf038c3f5ce3719dea8aa0f035dac30e9eb034b848ce716b9183ad7cc222d029f03e92205
- languageName: node
- linkType: hard
-
-"jest-haste-map@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-haste-map@npm:29.7.0"
- dependencies:
- "@jest/types": ^29.6.3
- "@types/graceful-fs": ^4.1.3
- "@types/node": "*"
- anymatch: ^3.0.3
- fb-watchman: ^2.0.0
- fsevents: ^2.3.2
- graceful-fs: ^4.2.9
- jest-regex-util: ^29.6.3
- jest-util: ^29.7.0
- jest-worker: ^29.7.0
- micromatch: ^4.0.4
- walker: ^1.0.8
- dependenciesMeta:
- fsevents:
- optional: true
- checksum: c2c8f2d3e792a963940fbdfa563ce14ef9e14d4d86da645b96d3cd346b8d35c5ce0b992ee08593939b5f718cf0a1f5a90011a056548a1dbf58397d4356786f01
- languageName: node
- linkType: hard
-
-"jest-leak-detector@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-leak-detector@npm:29.7.0"
- dependencies:
- jest-get-type: ^29.6.3
- pretty-format: ^29.7.0
- checksum: e3950e3ddd71e1d0c22924c51a300a1c2db6cf69ec1e51f95ccf424bcc070f78664813bef7aed4b16b96dfbdeea53fe358f8aeaaea84346ae15c3735758f1605
- languageName: node
- linkType: hard
-
-"jest-matcher-utils@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-matcher-utils@npm:29.7.0"
- dependencies:
- chalk: ^4.0.0
- jest-diff: ^29.7.0
- jest-get-type: ^29.6.3
- pretty-format: ^29.7.0
- checksum: d7259e5f995d915e8a37a8fd494cb7d6af24cd2a287b200f831717ba0d015190375f9f5dc35393b8ba2aae9b2ebd60984635269c7f8cff7d85b077543b7744cd
- languageName: node
- linkType: hard
-
-"jest-message-util@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-message-util@npm:29.7.0"
- dependencies:
- "@babel/code-frame": ^7.12.13
- "@jest/types": ^29.6.3
- "@types/stack-utils": ^2.0.0
- chalk: ^4.0.0
- graceful-fs: ^4.2.9
- micromatch: ^4.0.4
- pretty-format: ^29.7.0
- slash: ^3.0.0
- stack-utils: ^2.0.3
- checksum: a9d025b1c6726a2ff17d54cc694de088b0489456c69106be6b615db7a51b7beb66788bea7a59991a019d924fbf20f67d085a445aedb9a4d6760363f4d7d09930
- languageName: node
- linkType: hard
-
-"jest-mock@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-mock@npm:29.7.0"
- dependencies:
- "@jest/types": ^29.6.3
- "@types/node": "*"
- jest-util: ^29.7.0
- checksum: 81ba9b68689a60be1482212878973700347cb72833c5e5af09895882b9eb5c4e02843a1bbdf23f94c52d42708bab53a30c45a3482952c9eec173d1eaac5b86c5
- languageName: node
- linkType: hard
-
-"jest-pnp-resolver@npm:^1.2.2":
- version: 1.2.3
- resolution: "jest-pnp-resolver@npm:1.2.3"
- peerDependencies:
- jest-resolve: "*"
- peerDependenciesMeta:
- jest-resolve:
- optional: true
- checksum: db1a8ab2cb97ca19c01b1cfa9a9c8c69a143fde833c14df1fab0766f411b1148ff0df878adea09007ac6a2085ec116ba9a996a6ad104b1e58c20adbf88eed9b2
- languageName: node
- linkType: hard
-
-"jest-regex-util@npm:^29.6.3":
- version: 29.6.3
- resolution: "jest-regex-util@npm:29.6.3"
- checksum: 0518beeb9bf1228261695e54f0feaad3606df26a19764bc19541e0fc6e2a3737191904607fb72f3f2ce85d9c16b28df79b7b1ec9443aa08c3ef0e9efda6f8f2a
- languageName: node
- linkType: hard
-
-"jest-resolve-dependencies@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-resolve-dependencies@npm:29.7.0"
- dependencies:
- jest-regex-util: ^29.6.3
- jest-snapshot: ^29.7.0
- checksum: aeb75d8150aaae60ca2bb345a0d198f23496494677cd6aefa26fc005faf354061f073982175daaf32b4b9d86b26ca928586344516e3e6969aa614cb13b883984
- languageName: node
- linkType: hard
-
-"jest-resolve@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-resolve@npm:29.7.0"
- dependencies:
- chalk: ^4.0.0
- graceful-fs: ^4.2.9
- jest-haste-map: ^29.7.0
- jest-pnp-resolver: ^1.2.2
- jest-util: ^29.7.0
- jest-validate: ^29.7.0
- resolve: ^1.20.0
- resolve.exports: ^2.0.0
- slash: ^3.0.0
- checksum: 0ca218e10731aa17920526ec39deaec59ab9b966237905ffc4545444481112cd422f01581230eceb7e82d86f44a543d520a71391ec66e1b4ef1a578bd5c73487
- languageName: node
- linkType: hard
-
-"jest-runner@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-runner@npm:29.7.0"
- dependencies:
- "@jest/console": ^29.7.0
- "@jest/environment": ^29.7.0
- "@jest/test-result": ^29.7.0
- "@jest/transform": ^29.7.0
- "@jest/types": ^29.6.3
- "@types/node": "*"
- chalk: ^4.0.0
- emittery: ^0.13.1
- graceful-fs: ^4.2.9
- jest-docblock: ^29.7.0
- jest-environment-node: ^29.7.0
- jest-haste-map: ^29.7.0
- jest-leak-detector: ^29.7.0
- jest-message-util: ^29.7.0
- jest-resolve: ^29.7.0
- jest-runtime: ^29.7.0
- jest-util: ^29.7.0
- jest-watcher: ^29.7.0
- jest-worker: ^29.7.0
- p-limit: ^3.1.0
- source-map-support: 0.5.13
- checksum: f0405778ea64812bf9b5c50b598850d94ccf95d7ba21f090c64827b41decd680ee19fcbb494007cdd7f5d0d8906bfc9eceddd8fa583e753e736ecd462d4682fb
- languageName: node
- linkType: hard
-
-"jest-runtime@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-runtime@npm:29.7.0"
- dependencies:
- "@jest/environment": ^29.7.0
- "@jest/fake-timers": ^29.7.0
- "@jest/globals": ^29.7.0
- "@jest/source-map": ^29.6.3
- "@jest/test-result": ^29.7.0
- "@jest/transform": ^29.7.0
- "@jest/types": ^29.6.3
- "@types/node": "*"
- chalk: ^4.0.0
- cjs-module-lexer: ^1.0.0
- collect-v8-coverage: ^1.0.0
- glob: ^7.1.3
- graceful-fs: ^4.2.9
- jest-haste-map: ^29.7.0
- jest-message-util: ^29.7.0
- jest-mock: ^29.7.0
- jest-regex-util: ^29.6.3
- jest-resolve: ^29.7.0
- jest-snapshot: ^29.7.0
- jest-util: ^29.7.0
- slash: ^3.0.0
- strip-bom: ^4.0.0
- checksum: d19f113d013e80691e07047f68e1e3448ef024ff2c6b586ce4f90cd7d4c62a2cd1d460110491019719f3c59bfebe16f0e201ed005ef9f80e2cf798c374eed54e
- languageName: node
- linkType: hard
-
-"jest-snapshot@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-snapshot@npm:29.7.0"
- dependencies:
- "@babel/core": ^7.11.6
- "@babel/generator": ^7.7.2
- "@babel/plugin-syntax-jsx": ^7.7.2
- "@babel/plugin-syntax-typescript": ^7.7.2
- "@babel/types": ^7.3.3
- "@jest/expect-utils": ^29.7.0
- "@jest/transform": ^29.7.0
- "@jest/types": ^29.6.3
- babel-preset-current-node-syntax: ^1.0.0
- chalk: ^4.0.0
- expect: ^29.7.0
- graceful-fs: ^4.2.9
- jest-diff: ^29.7.0
- jest-get-type: ^29.6.3
- jest-matcher-utils: ^29.7.0
- jest-message-util: ^29.7.0
- jest-util: ^29.7.0
- natural-compare: ^1.4.0
- pretty-format: ^29.7.0
- semver: ^7.5.3
- checksum: 86821c3ad0b6899521ce75ee1ae7b01b17e6dfeff9166f2cf17f012e0c5d8c798f30f9e4f8f7f5bed01ea7b55a6bc159f5eda778311162cbfa48785447c237ad
- languageName: node
- linkType: hard
-
-"jest-util@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-util@npm:29.7.0"
- dependencies:
- "@jest/types": ^29.6.3
- "@types/node": "*"
- chalk: ^4.0.0
- ci-info: ^3.2.0
- graceful-fs: ^4.2.9
- picomatch: ^2.2.3
- checksum: 042ab4980f4ccd4d50226e01e5c7376a8556b472442ca6091a8f102488c0f22e6e8b89ea874111d2328a2080083bf3225c86f3788c52af0bd0345a00eb57a3ca
- languageName: node
- linkType: hard
-
-"jest-validate@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-validate@npm:29.7.0"
- dependencies:
- "@jest/types": ^29.6.3
- camelcase: ^6.2.0
- chalk: ^4.0.0
- jest-get-type: ^29.6.3
- leven: ^3.1.0
- pretty-format: ^29.7.0
- checksum: 191fcdc980f8a0de4dbdd879fa276435d00eb157a48683af7b3b1b98b0f7d9de7ffe12689b617779097ff1ed77601b9f7126b0871bba4f776e222c40f62e9dae
- languageName: node
- linkType: hard
-
-"jest-watcher@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-watcher@npm:29.7.0"
- dependencies:
- "@jest/test-result": ^29.7.0
- "@jest/types": ^29.6.3
- "@types/node": "*"
- ansi-escapes: ^4.2.1
- chalk: ^4.0.0
- emittery: ^0.13.1
- jest-util: ^29.7.0
- string-length: ^4.0.1
- checksum: 67e6e7fe695416deff96b93a14a561a6db69389a0667e9489f24485bb85e5b54e12f3b2ba511ec0b777eca1e727235b073e3ebcdd473d68888650489f88df92f
- languageName: node
- linkType: hard
-
-"jest-worker@npm:^29.7.0":
- version: 29.7.0
- resolution: "jest-worker@npm:29.7.0"
- dependencies:
- "@types/node": "*"
- jest-util: ^29.7.0
- merge-stream: ^2.0.0
- supports-color: ^8.0.0
- checksum: 30fff60af49675273644d408b650fc2eb4b5dcafc5a0a455f238322a8f9d8a98d847baca9d51ff197b6747f54c7901daa2287799230b856a0f48287d131f8c13
- languageName: node
- linkType: hard
-
-"jest@npm:^29.5.0":
- version: 29.7.0
- resolution: "jest@npm:29.7.0"
- dependencies:
- "@jest/core": ^29.7.0
- "@jest/types": ^29.6.3
- import-local: ^3.0.2
- jest-cli: ^29.7.0
- peerDependencies:
- node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0
- peerDependenciesMeta:
- node-notifier:
- optional: true
- bin:
- jest: bin/jest.js
- checksum: 17ca8d67504a7dbb1998cf3c3077ec9031ba3eb512da8d71cb91bcabb2b8995c4e4b292b740cb9bf1cbff5ce3e110b3f7c777b0cefb6f41ab05445f248d0ee0b
- languageName: node
- linkType: hard
-
-"js-tokens@npm:^4.0.0":
- version: 4.0.0
- resolution: "js-tokens@npm:4.0.0"
- checksum: 8a95213a5a77deb6cbe94d86340e8d9ace2b93bc367790b260101d2f36a2eaf4e4e22d9fa9cf459b38af3a32fb4190e638024cf82ec95ef708680e405ea7cc78
- languageName: node
- linkType: hard
-
-"js-yaml@npm:^3.13.1":
- version: 3.14.1
- resolution: "js-yaml@npm:3.14.1"
- dependencies:
- argparse: ^1.0.7
- esprima: ^4.0.0
- bin:
- js-yaml: bin/js-yaml.js
- checksum: bef146085f472d44dee30ec34e5cf36bf89164f5d585435a3d3da89e52622dff0b188a580e4ad091c3341889e14cb88cac6e4deb16dc5b1e9623bb0601fc255c
- languageName: node
- linkType: hard
-
-"js-yaml@npm:^4.1.0":
- version: 4.1.0
- resolution: "js-yaml@npm:4.1.0"
- dependencies:
- argparse: ^2.0.1
- bin:
- js-yaml: bin/js-yaml.js
- checksum: c7830dfd456c3ef2c6e355cc5a92e6700ceafa1d14bba54497b34a99f0376cecbb3e9ac14d3e5849b426d5a5140709a66237a8c991c675431271c4ce5504151a
- languageName: node
- linkType: hard
-
-"jsesc@npm:^2.5.1":
- version: 2.5.2
- resolution: "jsesc@npm:2.5.2"
- bin:
- jsesc: bin/jsesc
- checksum: 4dc190771129e12023f729ce20e1e0bfceac84d73a85bc3119f7f938843fe25a4aeccb54b6494dce26fcf263d815f5f31acdefac7cc9329efb8422a4f4d9fa9d
- languageName: node
- linkType: hard
-
-"json-parse-even-better-errors@npm:^2.3.0":
- version: 2.3.1
- resolution: "json-parse-even-better-errors@npm:2.3.1"
- checksum: 798ed4cf3354a2d9ccd78e86d2169515a0097a5c133337807cdf7f1fc32e1391d207ccfc276518cc1d7d8d4db93288b8a50ba4293d212ad1336e52a8ec0a941f
- languageName: node
- linkType: hard
-
-"json5@npm:^2.2.3":
- version: 2.2.3
- resolution: "json5@npm:2.2.3"
- bin:
- json5: lib/cli.js
- checksum: 2a7436a93393830bce797d4626275152e37e877b265e94ca69c99e3d20c2b9dab021279146a39cdb700e71b2dd32a4cebd1514cd57cee102b1af906ce5040349
- languageName: node
- linkType: hard
-
-"kleur@npm:^3.0.3":
- version: 3.0.3
- resolution: "kleur@npm:3.0.3"
- checksum: df82cd1e172f957bae9c536286265a5cdbd5eeca487cb0a3b2a7b41ef959fc61f8e7c0e9aeea9c114ccf2c166b6a8dd45a46fd619c1c569d210ecd2765ad5169
- languageName: node
- linkType: hard
-
-"kleur@npm:^4.0.3":
- version: 4.1.5
- resolution: "kleur@npm:4.1.5"
- checksum: 1dc476e32741acf0b1b5b0627ffd0d722e342c1b0da14de3e8ae97821327ca08f9fb944542fb3c126d90ac5f27f9d804edbe7c585bf7d12ef495d115e0f22c12
- languageName: node
- linkType: hard
-
-"konan@npm:^2.1.1":
- version: 2.1.1
- resolution: "konan@npm:2.1.1"
- dependencies:
- "@babel/parser": ^7.10.5
- "@babel/traverse": ^7.10.5
- checksum: 2e1eaaa563ce856f27572b5ac5ed7f1c45e40cef8fe31b79dc210b90f30c41dac1453466a6717f0954965d7e5e0c4beda37b564bffc91d54cfdac253ad36942a
- languageName: node
- linkType: hard
-
-"leven@npm:^3.1.0":
- version: 3.1.0
- resolution: "leven@npm:3.1.0"
- checksum: 638401d534585261b6003db9d99afd244dfe82d75ddb6db5c0df412842d5ab30b2ef18de471aaec70fe69a46f17b4ae3c7f01d8a4e6580ef7adb9f4273ad1e55
- languageName: node
- linkType: hard
-
-"lines-and-columns@npm:^1.1.6":
- version: 1.2.4
- resolution: "lines-and-columns@npm:1.2.4"
- checksum: 0c37f9f7fa212b38912b7145e1cd16a5f3cd34d782441c3e6ca653485d326f58b3caccda66efce1c5812bde4961bbde3374fae4b0d11bf1226152337f3894aa5
- languageName: node
- linkType: hard
-
-"locate-path@npm:^5.0.0":
- version: 5.0.0
- resolution: "locate-path@npm:5.0.0"
- dependencies:
- p-locate: ^4.1.0
- checksum: 83e51725e67517287d73e1ded92b28602e3ae5580b301fe54bfb76c0c723e3f285b19252e375712316774cf52006cb236aed5704692c32db0d5d089b69696e30
- languageName: node
- linkType: hard
-
-"locate-path@npm:^7.1.0":
- version: 7.2.0
- resolution: "locate-path@npm:7.2.0"
- dependencies:
- p-locate: ^6.0.0
- checksum: c1b653bdf29beaecb3d307dfb7c44d98a2a98a02ebe353c9ad055d1ac45d6ed4e1142563d222df9b9efebc2bcb7d4c792b507fad9e7150a04c29530b7db570f8
- languageName: node
- linkType: hard
-
-"lodash@npm:^4.17.21":
- version: 4.17.21
- resolution: "lodash@npm:4.17.21"
- checksum: eb835a2e51d381e561e508ce932ea50a8e5a68f4ebdd771ea240d3048244a8d13658acbd502cd4829768c56f2e16bdd4340b9ea141297d472517b83868e677f7
- languageName: node
- linkType: hard
-
-"longest-streak@npm:^3.0.0":
- version: 3.1.0
- resolution: "longest-streak@npm:3.1.0"
- checksum: d7f952ed004cbdb5c8bcfc4f7f5c3d65449e6c5a9e9be4505a656e3df5a57ee125f284286b4bf8ecea0c21a7b3bf2b8f9001ad506c319b9815ad6a63a47d0fd0
- languageName: node
- linkType: hard
-
-"lru-cache@npm:^10.0.1, lru-cache@npm:^9.1.1 || ^10.0.0":
- version: 10.2.0
- resolution: "lru-cache@npm:10.2.0"
- checksum: eee7ddda4a7475deac51ac81d7dd78709095c6fa46e8350dc2d22462559a1faa3b81ed931d5464b13d48cbd7e08b46100b6f768c76833912bc444b99c37e25db
- languageName: node
- linkType: hard
-
-"lru-cache@npm:^5.1.1":
- version: 5.1.1
- resolution: "lru-cache@npm:5.1.1"
- dependencies:
- yallist: ^3.0.2
- checksum: c154ae1cbb0c2206d1501a0e94df349653c92c8cbb25236d7e85190bcaf4567a03ac6eb43166fabfa36fd35623694da7233e88d9601fbf411a9a481d85dbd2cb
- languageName: node
- linkType: hard
-
-"lru-cache@npm:^6.0.0":
- version: 6.0.0
- resolution: "lru-cache@npm:6.0.0"
- dependencies:
- yallist: ^4.0.0
- checksum: f97f499f898f23e4585742138a22f22526254fdba6d75d41a1c2526b3b6cc5747ef59c5612ba7375f42aca4f8461950e925ba08c991ead0651b4918b7c978297
- languageName: node
- linkType: hard
-
-"lru-cache@npm:^7.7.1":
- version: 7.18.3
- resolution: "lru-cache@npm:7.18.3"
- checksum: e550d772384709deea3f141af34b6d4fa392e2e418c1498c078de0ee63670f1f46f5eee746e8ef7e69e1c895af0d4224e62ee33e66a543a14763b0f2e74c1356
- languageName: node
- linkType: hard
-
-"magic-string@npm:^0.30.5":
- version: 0.30.5
- resolution: "magic-string@npm:0.30.5"
- dependencies:
- "@jridgewell/sourcemap-codec": ^1.4.15
- checksum: da10fecff0c0a7d3faf756913ce62bd6d5e7b0402be48c3b27bfd651b90e29677e279069a63b764bcdc1b8ecdcdb898f29a5c5ec510f2323e8d62ee057a6eb18
- languageName: node
- linkType: hard
-
-"make-dir@npm:^4.0.0":
- version: 4.0.0
- resolution: "make-dir@npm:4.0.0"
- dependencies:
- semver: ^7.5.3
- checksum: bf0731a2dd3aab4db6f3de1585cea0b746bb73eb5a02e3d8d72757e376e64e6ada190b1eddcde5b2f24a81b688a9897efd5018737d05e02e2a671dda9cff8a8a
- languageName: node
- linkType: hard
-
-"make-fetch-happen@npm:^10.0.3":
- version: 10.2.1
- resolution: "make-fetch-happen@npm:10.2.1"
- dependencies:
- agentkeepalive: ^4.2.1
- cacache: ^16.1.0
- http-cache-semantics: ^4.1.0
- http-proxy-agent: ^5.0.0
- https-proxy-agent: ^5.0.0
- is-lambda: ^1.0.1
- lru-cache: ^7.7.1
- minipass: ^3.1.6
- minipass-collect: ^1.0.2
- minipass-fetch: ^2.0.3
- minipass-flush: ^1.0.5
- minipass-pipeline: ^1.2.4
- negotiator: ^0.6.3
- promise-retry: ^2.0.1
- socks-proxy-agent: ^7.0.0
- ssri: ^9.0.0
- checksum: 2332eb9a8ec96f1ffeeea56ccefabcb4193693597b132cd110734d50f2928842e22b84cfa1508e921b8385cdfd06dda9ad68645fed62b50fff629a580f5fb72c
- languageName: node
- linkType: hard
-
-"make-fetch-happen@npm:^13.0.0":
- version: 13.0.0
- resolution: "make-fetch-happen@npm:13.0.0"
- dependencies:
- "@npmcli/agent": ^2.0.0
- cacache: ^18.0.0
- http-cache-semantics: ^4.1.1
- is-lambda: ^1.0.1
- minipass: ^7.0.2
- minipass-fetch: ^3.0.0
- minipass-flush: ^1.0.5
- minipass-pipeline: ^1.2.4
- negotiator: ^0.6.3
- promise-retry: ^2.0.1
- ssri: ^10.0.0
- checksum: 7c7a6d381ce919dd83af398b66459a10e2fe8f4504f340d1d090d3fa3d1b0c93750220e1d898114c64467223504bd258612ba83efbc16f31b075cd56de24b4af
- languageName: node
- linkType: hard
-
-"makeerror@npm:1.0.12":
- version: 1.0.12
- resolution: "makeerror@npm:1.0.12"
- dependencies:
- tmpl: 1.0.5
- checksum: b38a025a12c8146d6eeea5a7f2bf27d51d8ad6064da8ca9405fcf7bf9b54acd43e3b30ddd7abb9b1bfa4ddb266019133313482570ddb207de568f71ecfcf6060
- languageName: node
- linkType: hard
-
-"map-cache@npm:^0.2.0":
- version: 0.2.2
- resolution: "map-cache@npm:0.2.2"
- checksum: 3067cea54285c43848bb4539f978a15dedc63c03022abeec6ef05c8cb6829f920f13b94bcaf04142fc6a088318e564c4785704072910d120d55dbc2e0c421969
- languageName: node
- linkType: hard
-
-"markdown-table@npm:^3.0.0":
- version: 3.0.3
- resolution: "markdown-table@npm:3.0.3"
- checksum: 8fcd3d9018311120fbb97115987f8b1665a603f3134c93fbecc5d1463380c8036f789e2a62c19432058829e594fff8db9ff81c88f83690b2f8ed6c074f8d9e10
- languageName: node
- linkType: hard
-
-"md5-file@npm:^5.0.0":
- version: 5.0.0
- resolution: "md5-file@npm:5.0.0"
- bin:
- md5-file: cli.js
- checksum: c606a00ff58adf5428e8e2f36d86e5d3c7029f9688126faca302cd83b5e92cac183a62e1d1f05fae7c2614e80f993326fd0a8d6a3a913c41ec7ea0eefc25aa76
- languageName: node
- linkType: hard
-
-"mdast-util-definitions@npm:^5.0.0":
- version: 5.1.2
- resolution: "mdast-util-definitions@npm:5.1.2"
- dependencies:
- "@types/mdast": ^3.0.0
- "@types/unist": ^2.0.0
- unist-util-visit: ^4.0.0
- checksum: 2544daccab744ea1ede76045c2577ae4f1cc1b9eb1ea51ab273fe1dca8db5a8d6f50f87759c0ce6484975914b144b7f40316f805cb9c86223a78db8de0b77bae
- languageName: node
- linkType: hard
-
-"mdast-util-find-and-replace@npm:^2.0.0, mdast-util-find-and-replace@npm:^2.2.1":
- version: 2.2.2
- resolution: "mdast-util-find-and-replace@npm:2.2.2"
- dependencies:
- "@types/mdast": ^3.0.0
- escape-string-regexp: ^5.0.0
- unist-util-is: ^5.0.0
- unist-util-visit-parents: ^5.0.0
- checksum: b4ce463c43fe6e1c38a53a89703f755c84ab5437f49bff9a0ac751279733332ca11c85ed0262aa6c17481f77b555d26ca6d64e70d6814f5b8d12d34a3e53a60b
- languageName: node
- linkType: hard
-
-"mdast-util-from-markdown@npm:^1.0.0":
- version: 1.3.1
- resolution: "mdast-util-from-markdown@npm:1.3.1"
- dependencies:
- "@types/mdast": ^3.0.0
- "@types/unist": ^2.0.0
- decode-named-character-reference: ^1.0.0
- mdast-util-to-string: ^3.1.0
- micromark: ^3.0.0
- micromark-util-decode-numeric-character-reference: ^1.0.0
- micromark-util-decode-string: ^1.0.0
- micromark-util-normalize-identifier: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- unist-util-stringify-position: ^3.0.0
- uvu: ^0.5.0
- checksum: c2fac225167e248d394332a4ea39596e04cbde07d8cdb3889e91e48972c4c3462a02b39fda3855345d90231eb17a90ac6e082fb4f012a77c1d0ddfb9c7446940
- languageName: node
- linkType: hard
-
-"mdast-util-gfm-autolink-literal@npm:^1.0.0":
- version: 1.0.3
- resolution: "mdast-util-gfm-autolink-literal@npm:1.0.3"
- dependencies:
- "@types/mdast": ^3.0.0
- ccount: ^2.0.0
- mdast-util-find-and-replace: ^2.0.0
- micromark-util-character: ^1.0.0
- checksum: 1748a8727cfc533bac0c287d6e72d571d165bfa77ae0418be4828177a3ec73c02c3f2ee534d87eb75cbaffa00c0866853bbcc60ae2255babb8210f7636ec2ce2
- languageName: node
- linkType: hard
-
-"mdast-util-gfm-footnote@npm:^1.0.0":
- version: 1.0.2
- resolution: "mdast-util-gfm-footnote@npm:1.0.2"
- dependencies:
- "@types/mdast": ^3.0.0
- mdast-util-to-markdown: ^1.3.0
- micromark-util-normalize-identifier: ^1.0.0
- checksum: 2d77505f9377ed7e14472ef5e6b8366c3fec2cf5f936bb36f9fbe5b97ccb7cce0464d9313c236fa86fb844206fd585db05707e4fcfb755e4fc1864194845f1f6
- languageName: node
- linkType: hard
-
-"mdast-util-gfm-strikethrough@npm:^1.0.0":
- version: 1.0.3
- resolution: "mdast-util-gfm-strikethrough@npm:1.0.3"
- dependencies:
- "@types/mdast": ^3.0.0
- mdast-util-to-markdown: ^1.3.0
- checksum: 17003340ff1bba643ec4a59fd4370fc6a32885cab2d9750a508afa7225ea71449fb05acaef60faa89c6378b8bcfbd86a9d94b05f3c6651ff27a60e3ddefc2549
- languageName: node
- linkType: hard
-
-"mdast-util-gfm-table@npm:^1.0.0":
- version: 1.0.7
- resolution: "mdast-util-gfm-table@npm:1.0.7"
- dependencies:
- "@types/mdast": ^3.0.0
- markdown-table: ^3.0.0
- mdast-util-from-markdown: ^1.0.0
- mdast-util-to-markdown: ^1.3.0
- checksum: 8b8c401bb4162e53f072a2dff8efbca880fd78d55af30601c791315ab6722cb2918176e8585792469a0c530cebb9df9b4e7fede75fdc4d83df2839e238836692
- languageName: node
- linkType: hard
-
-"mdast-util-gfm-task-list-item@npm:^1.0.0":
- version: 1.0.2
- resolution: "mdast-util-gfm-task-list-item@npm:1.0.2"
- dependencies:
- "@types/mdast": ^3.0.0
- mdast-util-to-markdown: ^1.3.0
- checksum: c9b86037d6953b84f11fb2fc3aa23d5b8e14ca0dfcb0eb2fb289200e172bb9d5647bfceb4f86606dc6d935e8d58f6a458c04d3e55e87ff8513c7d4ade976200b
- languageName: node
- linkType: hard
-
-"mdast-util-gfm@npm:^2.0.0":
- version: 2.0.2
- resolution: "mdast-util-gfm@npm:2.0.2"
- dependencies:
- mdast-util-from-markdown: ^1.0.0
- mdast-util-gfm-autolink-literal: ^1.0.0
- mdast-util-gfm-footnote: ^1.0.0
- mdast-util-gfm-strikethrough: ^1.0.0
- mdast-util-gfm-table: ^1.0.0
- mdast-util-gfm-task-list-item: ^1.0.0
- mdast-util-to-markdown: ^1.0.0
- checksum: 7078cb985255208bcbce94a121906417d38353c6b1a9acbe56ee8888010d3500608b5d51c16b0999ac63ca58848fb13012d55f26930ff6c6f3450f053d56514e
- languageName: node
- linkType: hard
-
-"mdast-util-inject@npm:^1.1.0":
- version: 1.1.0
- resolution: "mdast-util-inject@npm:1.1.0"
- dependencies:
- mdast-util-to-string: ^1.0.0
- checksum: f6539ed04bc88a827e7cc0e29b1355d463d9e9e53e3a386751c39d6129807db7682054a2973a57273c303e7dc5ab0e7c1b13e6310cfea271a3f2055e6a64fc7b
- languageName: node
- linkType: hard
-
-"mdast-util-phrasing@npm:^3.0.0":
- version: 3.0.1
- resolution: "mdast-util-phrasing@npm:3.0.1"
- dependencies:
- "@types/mdast": ^3.0.0
- unist-util-is: ^5.0.0
- checksum: c5b616d9b1eb76a6b351d195d94318494722525a12a89d9c8a3b091af7db3dd1fc55d294f9d29266d8159a8267b0df4a7a133bda8a3909d5331c383e1e1ff328
- languageName: node
- linkType: hard
-
-"mdast-util-to-hast@npm:^12.0.0":
- version: 12.3.0
- resolution: "mdast-util-to-hast@npm:12.3.0"
- dependencies:
- "@types/hast": ^2.0.0
- "@types/mdast": ^3.0.0
- mdast-util-definitions: ^5.0.0
- micromark-util-sanitize-uri: ^1.1.0
- trim-lines: ^3.0.0
- unist-util-generated: ^2.0.0
- unist-util-position: ^4.0.0
- unist-util-visit: ^4.0.0
- checksum: ea40c9f07dd0b731754434e81c913590c611b1fd753fa02550a1492aadfc30fb3adecaf62345ebb03cea2ddd250c15ab6e578fffde69c19955c9b87b10f2a9bb
- languageName: node
- linkType: hard
-
-"mdast-util-to-markdown@npm:^1.0.0, mdast-util-to-markdown@npm:^1.3.0":
- version: 1.5.0
- resolution: "mdast-util-to-markdown@npm:1.5.0"
- dependencies:
- "@types/mdast": ^3.0.0
- "@types/unist": ^2.0.0
- longest-streak: ^3.0.0
- mdast-util-phrasing: ^3.0.0
- mdast-util-to-string: ^3.0.0
- micromark-util-decode-string: ^1.0.0
- unist-util-visit: ^4.0.0
- zwitch: ^2.0.0
- checksum: 64338eb33e49bb0aea417591fd986f72fdd39205052563bb7ce9eb9ecc160824509bfacd740086a05af355c6d5c36353aafe95cab9e6927d674478757cee6259
- languageName: node
- linkType: hard
-
-"mdast-util-to-string@npm:^1.0.0":
- version: 1.1.0
- resolution: "mdast-util-to-string@npm:1.1.0"
- checksum: eec1eb283f3341376c8398b67ce512a11ab3e3191e3dbd5644d32a26784eac8d5f6d0b0fb81193af00d75a2c545cde765c8b03e966bd890076efb5d357fb4fe2
- languageName: node
- linkType: hard
-
-"mdast-util-to-string@npm:^3.0.0, mdast-util-to-string@npm:^3.1.0":
- version: 3.2.0
- resolution: "mdast-util-to-string@npm:3.2.0"
- dependencies:
- "@types/mdast": ^3.0.0
- checksum: dc40b544d54339878ae2c9f2b3198c029e1e07291d2126bd00ca28272ee6616d0d2194eb1c9828a7c34d412a79a7e73b26512a734698d891c710a1e73db1e848
- languageName: node
- linkType: hard
-
-"mdast-util-toc@npm:^6.0.0":
- version: 6.1.1
- resolution: "mdast-util-toc@npm:6.1.1"
- dependencies:
- "@types/extend": ^3.0.0
- "@types/mdast": ^3.0.0
- extend: ^3.0.0
- github-slugger: ^2.0.0
- mdast-util-to-string: ^3.1.0
- unist-util-is: ^5.0.0
- unist-util-visit: ^4.0.0
- checksum: 4a50455729c139b1ac648be8edfdaed581c9ed51697bccfbd1f1a70f16dbb4b7014da703135bc49551d116f82517483fd032ed1c5f27741f5c05d78925fe68e6
- languageName: node
- linkType: hard
-
-"merge-stream@npm:^2.0.0":
- version: 2.0.0
- resolution: "merge-stream@npm:2.0.0"
- checksum: 6fa4dcc8d86629705cea944a4b88ef4cb0e07656ebf223fa287443256414283dd25d91c1cd84c77987f2aec5927af1a9db6085757cb43d90eb170ebf4b47f4f4
- languageName: node
- linkType: hard
-
-"micromark-core-commonmark@npm:^1.0.0, micromark-core-commonmark@npm:^1.0.1":
- version: 1.1.0
- resolution: "micromark-core-commonmark@npm:1.1.0"
- dependencies:
- decode-named-character-reference: ^1.0.0
- micromark-factory-destination: ^1.0.0
- micromark-factory-label: ^1.0.0
- micromark-factory-space: ^1.0.0
- micromark-factory-title: ^1.0.0
- micromark-factory-whitespace: ^1.0.0
- micromark-util-character: ^1.0.0
- micromark-util-chunked: ^1.0.0
- micromark-util-classify-character: ^1.0.0
- micromark-util-html-tag-name: ^1.0.0
- micromark-util-normalize-identifier: ^1.0.0
- micromark-util-resolve-all: ^1.0.0
- micromark-util-subtokenize: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.1
- uvu: ^0.5.0
- checksum: c6dfedc95889cc73411cb222fc2330b9eda6d849c09c9fd9eb3cd3398af246167e9d3cdb0ae3ce9ae59dd34a14624c8330e380255d41279ad7350cf6c6be6c5b
- languageName: node
- linkType: hard
-
-"micromark-extension-gfm-autolink-literal@npm:^1.0.0":
- version: 1.0.5
- resolution: "micromark-extension-gfm-autolink-literal@npm:1.0.5"
- dependencies:
- micromark-util-character: ^1.0.0
- micromark-util-sanitize-uri: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- checksum: ec2f6bc4a3eb238c1b8be9744454ffbc2957e3d8a248697af5a26bb21479862300c0e40e0a92baf17c299ddf70d4bc4470d4eee112cd92322f87d81e45c2e83d
- languageName: node
- linkType: hard
-
-"micromark-extension-gfm-footnote@npm:^1.0.0":
- version: 1.1.2
- resolution: "micromark-extension-gfm-footnote@npm:1.1.2"
- dependencies:
- micromark-core-commonmark: ^1.0.0
- micromark-factory-space: ^1.0.0
- micromark-util-character: ^1.0.0
- micromark-util-normalize-identifier: ^1.0.0
- micromark-util-sanitize-uri: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- uvu: ^0.5.0
- checksum: c151a629ee1cd92363c018a50f926a002c944ac481ca72b3720b9529e9c20f1cbef98b0fefdcd2d594af37d0d9743673409cac488af0d2b194210fd16375dcb7
- languageName: node
- linkType: hard
-
-"micromark-extension-gfm-strikethrough@npm:^1.0.0":
- version: 1.0.7
- resolution: "micromark-extension-gfm-strikethrough@npm:1.0.7"
- dependencies:
- micromark-util-chunked: ^1.0.0
- micromark-util-classify-character: ^1.0.0
- micromark-util-resolve-all: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- uvu: ^0.5.0
- checksum: 169e310a4408feade0df80180f60d48c5cc5b7070e5e75e0bbd914e9100273508162c4bb20b72d53081dc37f1ff5834b3afa137862576f763878552c03389811
- languageName: node
- linkType: hard
-
-"micromark-extension-gfm-table@npm:^1.0.0":
- version: 1.0.7
- resolution: "micromark-extension-gfm-table@npm:1.0.7"
- dependencies:
- micromark-factory-space: ^1.0.0
- micromark-util-character: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- uvu: ^0.5.0
- checksum: 4853731285224e409d7e2c94c6ec849165093bff819e701221701aa7b7b34c17702c44f2f831e96b49dc27bb07e445b02b025561b68e62f5c3254415197e7af6
- languageName: node
- linkType: hard
-
-"micromark-extension-gfm-tagfilter@npm:^1.0.0":
- version: 1.0.2
- resolution: "micromark-extension-gfm-tagfilter@npm:1.0.2"
- dependencies:
- micromark-util-types: ^1.0.0
- checksum: 7d2441df51f890c86f8e7cf7d331a570b69c8105fa1c2fc5b737cb739502c16c8ee01cf35550a8a78f89497c5dfacc97cf82d55de6274e8320f3aec25e2b0dd2
- languageName: node
- linkType: hard
-
-"micromark-extension-gfm-task-list-item@npm:^1.0.0":
- version: 1.0.5
- resolution: "micromark-extension-gfm-task-list-item@npm:1.0.5"
- dependencies:
- micromark-factory-space: ^1.0.0
- micromark-util-character: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- uvu: ^0.5.0
- checksum: 929f05343d272cffb8008899289f4cffe986ef98fc622ebbd1aa4ff11470e6b32ed3e1f18cd294adb69cabb961a400650078f6c12b322cc515b82b5068b31960
- languageName: node
- linkType: hard
-
-"micromark-extension-gfm@npm:^2.0.0":
- version: 2.0.3
- resolution: "micromark-extension-gfm@npm:2.0.3"
- dependencies:
- micromark-extension-gfm-autolink-literal: ^1.0.0
- micromark-extension-gfm-footnote: ^1.0.0
- micromark-extension-gfm-strikethrough: ^1.0.0
- micromark-extension-gfm-table: ^1.0.0
- micromark-extension-gfm-tagfilter: ^1.0.0
- micromark-extension-gfm-task-list-item: ^1.0.0
- micromark-util-combine-extensions: ^1.0.0
- micromark-util-types: ^1.0.0
- checksum: c4a917c16d7aa5d00d1767b5ce5f3b1a78c0de11dbd5c8f69d2545083568aa6bb13bd9d8e4c7fec5f4da10e7ed8344b15acffc843b33a615c17396a118bc2bc1
- languageName: node
- linkType: hard
-
-"micromark-factory-destination@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-factory-destination@npm:1.1.0"
- dependencies:
- micromark-util-character: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- checksum: 9e2b5fb5fedbf622b687e20d51eb3d56ae90c0e7ecc19b37bd5285ec392c1e56f6e21aa7cfcb3c01eda88df88fe528f3acb91a5f57d7f4cba310bc3cd7f824fa
- languageName: node
- linkType: hard
-
-"micromark-factory-label@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-factory-label@npm:1.1.0"
- dependencies:
- micromark-util-character: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- uvu: ^0.5.0
- checksum: fcda48f1287d9b148c562c627418a2ab759cdeae9c8e017910a0cba94bb759a96611e1fc6df33182e97d28fbf191475237298983bb89ef07d5b02464b1ad28d5
- languageName: node
- linkType: hard
-
-"micromark-factory-space@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-factory-space@npm:1.1.0"
- dependencies:
- micromark-util-character: ^1.0.0
- micromark-util-types: ^1.0.0
- checksum: b58435076b998a7e244259a4694eb83c78915581206b6e7fc07b34c6abd36a1726ade63df8972fbf6c8fa38eecb9074f4e17be8d53f942e3b3d23d1a0ecaa941
- languageName: node
- linkType: hard
-
-"micromark-factory-title@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-factory-title@npm:1.1.0"
- dependencies:
- micromark-factory-space: ^1.0.0
- micromark-util-character: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- checksum: 4432d3dbc828c81f483c5901b0c6591a85d65a9e33f7d96ba7c3ae821617a0b3237ff5faf53a9152d00aaf9afb3a9f185b205590f40ed754f1d9232e0e9157b1
- languageName: node
- linkType: hard
-
-"micromark-factory-whitespace@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-factory-whitespace@npm:1.1.0"
- dependencies:
- micromark-factory-space: ^1.0.0
- micromark-util-character: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- checksum: ef0fa682c7d593d85a514ee329809dee27d10bc2a2b65217d8ef81173e33b8e83c549049764b1ad851adfe0a204dec5450d9d20a4ca8598f6c94533a73f73fcd
- languageName: node
- linkType: hard
-
-"micromark-util-character@npm:^1.0.0, micromark-util-character@npm:^1.1.0":
- version: 1.2.0
- resolution: "micromark-util-character@npm:1.2.0"
- dependencies:
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- checksum: 089e79162a19b4a28731736246579ab7e9482ac93cd681c2bfca9983dcff659212ef158a66a5957e9d4b1dba957d1b87b565d85418a5b009f0294f1f07f2aaac
- languageName: node
- linkType: hard
-
-"micromark-util-chunked@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-util-chunked@npm:1.1.0"
- dependencies:
- micromark-util-symbol: ^1.0.0
- checksum: c435bde9110cb595e3c61b7f54c2dc28ee03e6a57fa0fc1e67e498ad8bac61ee5a7457a2b6a73022ddc585676ede4b912d28dcf57eb3bd6951e54015e14dc20b
- languageName: node
- linkType: hard
-
-"micromark-util-classify-character@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-util-classify-character@npm:1.1.0"
- dependencies:
- micromark-util-character: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- checksum: 8499cb0bb1f7fb946f5896285fcca65cd742f66cd3e79ba7744792bd413ec46834f932a286de650349914d02e822946df3b55d03e6a8e1d245d1ddbd5102e5b0
- languageName: node
- linkType: hard
-
-"micromark-util-combine-extensions@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-util-combine-extensions@npm:1.1.0"
- dependencies:
- micromark-util-chunked: ^1.0.0
- micromark-util-types: ^1.0.0
- checksum: ee78464f5d4b61ccb437850cd2d7da4d690b260bca4ca7a79c4bb70291b84f83988159e373b167181b6716cb197e309bc6e6c96a68cc3ba9d50c13652774aba9
- languageName: node
- linkType: hard
-
-"micromark-util-decode-numeric-character-reference@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-util-decode-numeric-character-reference@npm:1.1.0"
- dependencies:
- micromark-util-symbol: ^1.0.0
- checksum: 4733fe75146e37611243f055fc6847137b66f0cde74d080e33bd26d0408c1d6f44cabc984063eee5968b133cb46855e729d555b9ff8d744652262b7b51feec73
- languageName: node
- linkType: hard
-
-"micromark-util-decode-string@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-util-decode-string@npm:1.1.0"
- dependencies:
- decode-named-character-reference: ^1.0.0
- micromark-util-character: ^1.0.0
- micromark-util-decode-numeric-character-reference: ^1.0.0
- micromark-util-symbol: ^1.0.0
- checksum: f1625155db452f15aa472918499689ba086b9c49d1322a08b22bfbcabe918c61b230a3002c8bc3ea9b1f52ca7a9bb1c3dd43ccb548c7f5f8b16c24a1ae77a813
- languageName: node
- linkType: hard
-
-"micromark-util-encode@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-util-encode@npm:1.1.0"
- checksum: 4ef29d02b12336918cea6782fa87c8c578c67463925221d4e42183a706bde07f4b8b5f9a5e1c7ce8c73bb5a98b261acd3238fecd152e6dd1cdfa2d1ae11b60a0
- languageName: node
- linkType: hard
-
-"micromark-util-html-tag-name@npm:^1.0.0":
- version: 1.2.0
- resolution: "micromark-util-html-tag-name@npm:1.2.0"
- checksum: ccf0fa99b5c58676dc5192c74665a3bfd1b536fafaf94723bd7f31f96979d589992df6fcf2862eba290ef18e6a8efb30ec8e1e910d9f3fc74f208871e9f84750
- languageName: node
- linkType: hard
-
-"micromark-util-normalize-identifier@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-util-normalize-identifier@npm:1.1.0"
- dependencies:
- micromark-util-symbol: ^1.0.0
- checksum: 8655bea41ffa4333e03fc22462cb42d631bbef9c3c07b625fd852b7eb442a110f9d2e5902a42e65188d85498279569502bf92f3434a1180fc06f7c37edfbaee2
- languageName: node
- linkType: hard
-
-"micromark-util-resolve-all@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-util-resolve-all@npm:1.1.0"
- dependencies:
- micromark-util-types: ^1.0.0
- checksum: 1ce6c0237cd3ca061e76fae6602cf95014e764a91be1b9f10d36cb0f21ca88f9a07de8d49ab8101efd0b140a4fbfda6a1efb72027ab3f4d5b54c9543271dc52c
- languageName: node
- linkType: hard
-
-"micromark-util-sanitize-uri@npm:^1.0.0, micromark-util-sanitize-uri@npm:^1.1.0":
- version: 1.2.0
- resolution: "micromark-util-sanitize-uri@npm:1.2.0"
- dependencies:
- micromark-util-character: ^1.0.0
- micromark-util-encode: ^1.0.0
- micromark-util-symbol: ^1.0.0
- checksum: 6663f365c4fe3961d622a580f4a61e34867450697f6806f027f21cf63c92989494895fcebe2345d52e249fe58a35be56e223a9776d084c9287818b40c779acc1
- languageName: node
- linkType: hard
-
-"micromark-util-subtokenize@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-util-subtokenize@npm:1.1.0"
- dependencies:
- micromark-util-chunked: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.0
- uvu: ^0.5.0
- checksum: 4a9d780c4d62910e196ea4fd886dc4079d8e424e5d625c0820016da0ed399a281daff39c50f9288045cc4bcd90ab47647e5396aba500f0853105d70dc8b1fc45
- languageName: node
- linkType: hard
-
-"micromark-util-symbol@npm:^1.0.0":
- version: 1.1.0
- resolution: "micromark-util-symbol@npm:1.1.0"
- checksum: 02414a753b79f67ff3276b517eeac87913aea6c028f3e668a19ea0fc09d98aea9f93d6222a76ca783d20299af9e4b8e7c797fe516b766185dcc6e93290f11f88
- languageName: node
- linkType: hard
-
-"micromark-util-types@npm:^1.0.0, micromark-util-types@npm:^1.0.1":
- version: 1.1.0
- resolution: "micromark-util-types@npm:1.1.0"
- checksum: b0ef2b4b9589f15aec2666690477a6a185536927ceb7aa55a0f46475852e012d75a1ab945187e5c7841969a842892164b15d58ff8316b8e0d6cc920cabd5ede7
- languageName: node
- linkType: hard
-
-"micromark@npm:^3.0.0":
- version: 3.2.0
- resolution: "micromark@npm:3.2.0"
- dependencies:
- "@types/debug": ^4.0.0
- debug: ^4.0.0
- decode-named-character-reference: ^1.0.0
- micromark-core-commonmark: ^1.0.1
- micromark-factory-space: ^1.0.0
- micromark-util-character: ^1.0.0
- micromark-util-chunked: ^1.0.0
- micromark-util-combine-extensions: ^1.0.0
- micromark-util-decode-numeric-character-reference: ^1.0.0
- micromark-util-encode: ^1.0.0
- micromark-util-normalize-identifier: ^1.0.0
- micromark-util-resolve-all: ^1.0.0
- micromark-util-sanitize-uri: ^1.0.0
- micromark-util-subtokenize: ^1.0.0
- micromark-util-symbol: ^1.0.0
- micromark-util-types: ^1.0.1
- uvu: ^0.5.0
- checksum: 56c15851ad3eb8301aede65603473443e50c92a54849cac1dadd57e4ec33ab03a0a77f3df03de47133e6e8f695dae83b759b514586193269e98c0bf319ecd5e4
- languageName: node
- linkType: hard
-
-"micromatch@npm:^4.0.4":
- version: 4.0.5
- resolution: "micromatch@npm:4.0.5"
- dependencies:
- braces: ^3.0.2
- picomatch: ^2.3.1
- checksum: 02a17b671c06e8fefeeb6ef996119c1e597c942e632a21ef589154f23898c9c6a9858526246abb14f8bca6e77734aa9dcf65476fca47cedfb80d9577d52843fc
- languageName: node
- linkType: hard
-
-"mimic-fn@npm:^2.1.0":
- version: 2.1.0
- resolution: "mimic-fn@npm:2.1.0"
- checksum: d2421a3444848ce7f84bd49115ddacff29c15745db73f54041edc906c14b131a38d05298dae3081667627a59b2eb1ca4b436ff2e1b80f69679522410418b478a
- languageName: node
- linkType: hard
-
-"minimatch@npm:^3.0.4, minimatch@npm:^3.1.1":
- version: 3.1.2
- resolution: "minimatch@npm:3.1.2"
- dependencies:
- brace-expansion: ^1.1.7
- checksum: c154e566406683e7bcb746e000b84d74465b3a832c45d59912b9b55cd50dee66e5c4b1e5566dba26154040e51672f9aa450a9aef0c97cfc7336b78b7afb9540a
- languageName: node
- linkType: hard
-
-"minimatch@npm:^5.0.1":
- version: 5.1.6
- resolution: "minimatch@npm:5.1.6"
- dependencies:
- brace-expansion: ^2.0.1
- checksum: 7564208ef81d7065a370f788d337cd80a689e981042cb9a1d0e6580b6c6a8c9279eba80010516e258835a988363f99f54a6f711a315089b8b42694f5da9d0d77
- languageName: node
- linkType: hard
-
-"minimatch@npm:^9.0.1":
- version: 9.0.3
- resolution: "minimatch@npm:9.0.3"
- dependencies:
- brace-expansion: ^2.0.1
- checksum: 253487976bf485b612f16bf57463520a14f512662e592e95c571afdab1442a6a6864b6c88f248ce6fc4ff0b6de04ac7aa6c8bb51e868e99d1d65eb0658a708b5
- languageName: node
- linkType: hard
-
-"minimist@npm:^1.2.5":
- version: 1.2.8
- resolution: "minimist@npm:1.2.8"
- checksum: 75a6d645fb122dad29c06a7597bddea977258957ed88d7a6df59b5cd3fe4a527e253e9bbf2e783e4b73657f9098b96a5fe96ab8a113655d4109108577ecf85b0
- languageName: node
- linkType: hard
-
-"minipass-collect@npm:^1.0.2":
- version: 1.0.2
- resolution: "minipass-collect@npm:1.0.2"
- dependencies:
- minipass: ^3.0.0
- checksum: 14df761028f3e47293aee72888f2657695ec66bd7d09cae7ad558da30415fdc4752bbfee66287dcc6fd5e6a2fa3466d6c484dc1cbd986525d9393b9523d97f10
- languageName: node
- linkType: hard
-
-"minipass-collect@npm:^2.0.1":
- version: 2.0.1
- resolution: "minipass-collect@npm:2.0.1"
- dependencies:
- minipass: ^7.0.3
- checksum: b251bceea62090f67a6cced7a446a36f4cd61ee2d5cea9aee7fff79ba8030e416327a1c5aa2908dc22629d06214b46d88fdab8c51ac76bacbf5703851b5ad342
- languageName: node
- linkType: hard
-
-"minipass-fetch@npm:^2.0.3":
- version: 2.1.2
- resolution: "minipass-fetch@npm:2.1.2"
- dependencies:
- encoding: ^0.1.13
- minipass: ^3.1.6
- minipass-sized: ^1.0.3
- minizlib: ^2.1.2
- dependenciesMeta:
- encoding:
- optional: true
- checksum: 3f216be79164e915fc91210cea1850e488793c740534985da017a4cbc7a5ff50506956d0f73bb0cb60e4fe91be08b6b61ef35101706d3ef5da2c8709b5f08f91
- languageName: node
- linkType: hard
-
-"minipass-fetch@npm:^3.0.0":
- version: 3.0.4
- resolution: "minipass-fetch@npm:3.0.4"
- dependencies:
- encoding: ^0.1.13
- minipass: ^7.0.3
- minipass-sized: ^1.0.3
- minizlib: ^2.1.2
- dependenciesMeta:
- encoding:
- optional: true
- checksum: af7aad15d5c128ab1ebe52e043bdf7d62c3c6f0cecb9285b40d7b395e1375b45dcdfd40e63e93d26a0e8249c9efd5c325c65575aceee192883970ff8cb11364a
- languageName: node
- linkType: hard
-
-"minipass-flush@npm:^1.0.5":
- version: 1.0.5
- resolution: "minipass-flush@npm:1.0.5"
- dependencies:
- minipass: ^3.0.0
- checksum: 56269a0b22bad756a08a94b1ffc36b7c9c5de0735a4dd1ab2b06c066d795cfd1f0ac44a0fcae13eece5589b908ecddc867f04c745c7009be0b566421ea0944cf
- languageName: node
- linkType: hard
-
-"minipass-pipeline@npm:^1.2.4":
- version: 1.2.4
- resolution: "minipass-pipeline@npm:1.2.4"
- dependencies:
- minipass: ^3.0.0
- checksum: b14240dac0d29823c3d5911c286069e36d0b81173d7bdf07a7e4a91ecdef92cdff4baaf31ea3746f1c61e0957f652e641223970870e2353593f382112257971b
- languageName: node
- linkType: hard
-
-"minipass-sized@npm:^1.0.3":
- version: 1.0.3
- resolution: "minipass-sized@npm:1.0.3"
- dependencies:
- minipass: ^3.0.0
- checksum: 79076749fcacf21b5d16dd596d32c3b6bf4d6e62abb43868fac21674078505c8b15eaca4e47ed844985a4514854f917d78f588fcd029693709417d8f98b2bd60
- languageName: node
- linkType: hard
-
-"minipass@npm:^3.0.0, minipass@npm:^3.1.1, minipass@npm:^3.1.6":
- version: 3.3.6
- resolution: "minipass@npm:3.3.6"
- dependencies:
- yallist: ^4.0.0
- checksum: a30d083c8054cee83cdcdc97f97e4641a3f58ae743970457b1489ce38ee1167b3aaf7d815cd39ec7a99b9c40397fd4f686e83750e73e652b21cb516f6d845e48
- languageName: node
- linkType: hard
-
-"minipass@npm:^5.0.0":
- version: 5.0.0
- resolution: "minipass@npm:5.0.0"
- checksum: 425dab288738853fded43da3314a0b5c035844d6f3097a8e3b5b29b328da8f3c1af6fc70618b32c29ff906284cf6406b6841376f21caaadd0793c1d5a6a620ea
- languageName: node
- linkType: hard
-
-"minipass@npm:^5.0.0 || ^6.0.2 || ^7.0.0, minipass@npm:^7.0.2, minipass@npm:^7.0.3":
- version: 7.0.4
- resolution: "minipass@npm:7.0.4"
- checksum: 87585e258b9488caf2e7acea242fd7856bbe9a2c84a7807643513a338d66f368c7d518200ad7b70a508664d408aa000517647b2930c259a8b1f9f0984f344a21
- languageName: node
- linkType: hard
-
-"minizlib@npm:^2.1.1, minizlib@npm:^2.1.2":
- version: 2.1.2
- resolution: "minizlib@npm:2.1.2"
- dependencies:
- minipass: ^3.0.0
- yallist: ^4.0.0
- checksum: f1fdeac0b07cf8f30fcf12f4b586795b97be856edea22b5e9072707be51fc95d41487faec3f265b42973a304fe3a64acd91a44a3826a963e37b37bafde0212c3
- languageName: node
- linkType: hard
-
-"mkdirp-classic@npm:^0.5.2, mkdirp-classic@npm:^0.5.3":
- version: 0.5.3
- resolution: "mkdirp-classic@npm:0.5.3"
- checksum: 3f4e088208270bbcc148d53b73e9a5bd9eef05ad2cbf3b3d0ff8795278d50dd1d11a8ef1875ff5aea3fa888931f95bfcb2ad5b7c1061cfefd6284d199e6776ac
- languageName: node
- linkType: hard
-
-"mkdirp@npm:^1.0.3, mkdirp@npm:^1.0.4":
- version: 1.0.4
- resolution: "mkdirp@npm:1.0.4"
- bin:
- mkdirp: bin/cmd.js
- checksum: a96865108c6c3b1b8e1d5e9f11843de1e077e57737602de1b82030815f311be11f96f09cce59bd5b903d0b29834733e5313f9301e3ed6d6f6fba2eae0df4298f
- languageName: node
- linkType: hard
-
-"mri@npm:^1.1.0":
- version: 1.2.0
- resolution: "mri@npm:1.2.0"
- checksum: 83f515abbcff60150873e424894a2f65d68037e5a7fcde8a9e2b285ee9c13ac581b63cfc1e6826c4732de3aeb84902f7c1e16b7aff46cd3f897a0f757a894e85
- languageName: node
- linkType: hard
-
-"ms@npm:2.1.2":
- version: 2.1.2
- resolution: "ms@npm:2.1.2"
- checksum: 673cdb2c3133eb050c745908d8ce632ed2c02d85640e2edb3ace856a2266a813b30c613569bf3354fdf4ea7d1a1494add3bfa95e2713baa27d0c2c71fc44f58f
- languageName: node
- linkType: hard
-
-"ms@npm:^2.0.0":
- version: 2.1.3
- resolution: "ms@npm:2.1.3"
- checksum: aa92de608021b242401676e35cfa5aa42dd70cbdc082b916da7fb925c542173e36bce97ea3e804923fe92c0ad991434e4a38327e15a1b5b5f945d66df615ae6d
- languageName: node
- linkType: hard
-
-"nanoid@npm:^3.3.7":
- version: 3.3.7
- resolution: "nanoid@npm:3.3.7"
- bin:
- nanoid: bin/nanoid.cjs
- checksum: d36c427e530713e4ac6567d488b489a36582ef89da1d6d4e3b87eded11eb10d7042a877958c6f104929809b2ab0bafa17652b076cdf84324aa75b30b722204f2
- languageName: node
- linkType: hard
-
-"natural-compare@npm:^1.4.0":
- version: 1.4.0
- resolution: "natural-compare@npm:1.4.0"
- checksum: 23ad088b08f898fc9b53011d7bb78ec48e79de7627e01ab5518e806033861bef68d5b0cd0e2205c2f36690ac9571ff6bcb05eb777ced2eeda8d4ac5b44592c3d
- languageName: node
- linkType: hard
-
-"negotiator@npm:^0.6.3":
- version: 0.6.3
- resolution: "negotiator@npm:0.6.3"
- checksum: b8ffeb1e262eff7968fc90a2b6767b04cfd9842582a9d0ece0af7049537266e7b2506dfb1d107a32f06dd849ab2aea834d5830f7f4d0e5cb7d36e1ae55d021d9
- languageName: node
- linkType: hard
-
-"node-abi@npm:^3.3.0":
- version: 3.52.0
- resolution: "node-abi@npm:3.52.0"
- dependencies:
- semver: ^7.3.5
- checksum: 2ef47937d058fa1f0817294fe5ac3ec1d370d3f8eb4931ea920b7e147033390058d3bc35b64d9161036ad2fda191aa1155005cea20ec50984312637221559354
- languageName: node
- linkType: hard
-
-"node-addon-api@npm:^6.1.0":
- version: 6.1.0
- resolution: "node-addon-api@npm:6.1.0"
- dependencies:
- node-gyp: latest
- checksum: 3a539510e677cfa3a833aca5397300e36141aca064cdc487554f2017110709a03a95da937e98c2a14ec3c626af7b2d1b6dabe629a481f9883143d0d5bff07bf2
- languageName: node
- linkType: hard
-
-"node-gyp-build@npm:^4.6.0":
- version: 4.7.1
- resolution: "node-gyp-build@npm:4.7.1"
- bin:
- node-gyp-build: bin.js
- node-gyp-build-optional: optional.js
- node-gyp-build-test: build-test.js
- checksum: 2ef8248021489db03be3e8098977cdc797b80a9b12b77c6dcb89b0dc89b8c62e6a482672ee298f61021740ae7f080fb33154cfec8fb158cec620f57b0fae87c0
- languageName: node
- linkType: hard
-
-"node-gyp@npm:9.x.x":
- version: 9.4.1
- resolution: "node-gyp@npm:9.4.1"
- dependencies:
- env-paths: ^2.2.0
- exponential-backoff: ^3.1.1
- glob: ^7.1.4
- graceful-fs: ^4.2.6
- make-fetch-happen: ^10.0.3
- nopt: ^6.0.0
- npmlog: ^6.0.0
- rimraf: ^3.0.2
- semver: ^7.3.5
- tar: ^6.1.2
- which: ^2.0.2
- bin:
- node-gyp: bin/node-gyp.js
- checksum: 8576c439e9e925ab50679f87b7dfa7aa6739e42822e2ad4e26c36341c0ba7163fdf5a946f0a67a476d2f24662bc40d6c97bd9e79ced4321506738e6b760a1577
- languageName: node
- linkType: hard
-
-"node-gyp@npm:latest":
- version: 10.0.1
- resolution: "node-gyp@npm:10.0.1"
- dependencies:
- env-paths: ^2.2.0
- exponential-backoff: ^3.1.1
- glob: ^10.3.10
- graceful-fs: ^4.2.6
- make-fetch-happen: ^13.0.0
- nopt: ^7.0.0
- proc-log: ^3.0.0
- semver: ^7.3.5
- tar: ^6.1.2
- which: ^4.0.0
- bin:
- node-gyp: bin/node-gyp.js
- checksum: 60a74e66d364903ce02049966303a57f898521d139860ac82744a5fdd9f7b7b3b61f75f284f3bfe6e6add3b8f1871ce305a1d41f775c7482de837b50c792223f
- languageName: node
- linkType: hard
-
-"node-int64@npm:^0.4.0":
- version: 0.4.0
- resolution: "node-int64@npm:0.4.0"
- checksum: d0b30b1ee6d961851c60d5eaa745d30b5c95d94bc0e74b81e5292f7c42a49e3af87f1eb9e89f59456f80645d679202537de751b7d72e9e40ceea40c5e449057e
- languageName: node
- linkType: hard
-
-"node-releases@npm:^2.0.14":
- version: 2.0.14
- resolution: "node-releases@npm:2.0.14"
- checksum: 59443a2f77acac854c42d321bf1b43dea0aef55cd544c6a686e9816a697300458d4e82239e2d794ea05f7bbbc8a94500332e2d3ac3f11f52e4b16cbe638b3c41
- languageName: node
- linkType: hard
-
-"nopt@npm:^6.0.0":
- version: 6.0.0
- resolution: "nopt@npm:6.0.0"
- dependencies:
- abbrev: ^1.0.0
- bin:
- nopt: bin/nopt.js
- checksum: 82149371f8be0c4b9ec2f863cc6509a7fd0fa729929c009f3a58e4eb0c9e4cae9920e8f1f8eb46e7d032fec8fb01bede7f0f41a67eb3553b7b8e14fa53de1dac
- languageName: node
- linkType: hard
-
-"nopt@npm:^7.0.0":
- version: 7.2.0
- resolution: "nopt@npm:7.2.0"
- dependencies:
- abbrev: ^2.0.0
- bin:
- nopt: bin/nopt.js
- checksum: a9c0f57fb8cb9cc82ae47192ca2b7ef00e199b9480eed202482c962d61b59a7fbe7541920b2a5839a97b42ee39e288c0aed770e38057a608d7f579389dfde410
- languageName: node
- linkType: hard
-
-"normalize-package-data@npm:^3.0.2":
- version: 3.0.3
- resolution: "normalize-package-data@npm:3.0.3"
- dependencies:
- hosted-git-info: ^4.0.1
- is-core-module: ^2.5.0
- semver: ^7.3.4
- validate-npm-package-license: ^3.0.1
- checksum: bbcee00339e7c26fdbc760f9b66d429258e2ceca41a5df41f5df06cc7652de8d82e8679ff188ca095cad8eff2b6118d7d866af2b68400f74602fbcbce39c160a
- languageName: node
- linkType: hard
-
-"normalize-path@npm:^3.0.0, normalize-path@npm:~3.0.0":
- version: 3.0.0
- resolution: "normalize-path@npm:3.0.0"
- checksum: 88eeb4da891e10b1318c4b2476b6e2ecbeb5ff97d946815ffea7794c31a89017c70d7f34b3c2ebf23ef4e9fc9fb99f7dffe36da22011b5b5c6ffa34f4873ec20
- languageName: node
- linkType: hard
-
-"npm-run-path@npm:^3.1.0":
- version: 3.1.0
- resolution: "npm-run-path@npm:3.1.0"
- dependencies:
- path-key: ^3.0.0
- checksum: 141e0b8f0e3b137347a2896572c9a84701754dda0670d3ceb8c56a87702ee03c26227e4517ab93f2904acfc836547315e740b8289bb24ca0cd8ba2b198043b0f
- languageName: node
- linkType: hard
-
-"npm-run-path@npm:^4.0.1":
- version: 4.0.1
- resolution: "npm-run-path@npm:4.0.1"
- dependencies:
- path-key: ^3.0.0
- checksum: 5374c0cea4b0bbfdfae62da7bbdf1e1558d338335f4cacf2515c282ff358ff27b2ecb91ffa5330a8b14390ac66a1e146e10700440c1ab868208430f56b5f4d23
- languageName: node
- linkType: hard
-
-"npmlog@npm:^6.0.0":
- version: 6.0.2
- resolution: "npmlog@npm:6.0.2"
- dependencies:
- are-we-there-yet: ^3.0.0
- console-control-strings: ^1.1.0
- gauge: ^4.0.3
- set-blocking: ^2.0.0
- checksum: ae238cd264a1c3f22091cdd9e2b106f684297d3c184f1146984ecbe18aaa86343953f26b9520dedd1b1372bc0316905b736c1932d778dbeb1fcf5a1001390e2a
- languageName: node
- linkType: hard
-
-"once@npm:^1.3.0, once@npm:^1.3.1, once@npm:^1.4.0":
- version: 1.4.0
- resolution: "once@npm:1.4.0"
- dependencies:
- wrappy: 1
- checksum: cd0a88501333edd640d95f0d2700fbde6bff20b3d4d9bdc521bdd31af0656b5706570d6c6afe532045a20bb8dc0849f8332d6f2a416e0ba6d3d3b98806c7db68
- languageName: node
- linkType: hard
-
-"onetime@npm:^5.1.2":
- version: 5.1.2
- resolution: "onetime@npm:5.1.2"
- dependencies:
- mimic-fn: ^2.1.0
- checksum: 2478859ef817fc5d4e9c2f9e5728512ddd1dbc9fb7829ad263765bb6d3b91ce699d6e2332eef6b7dff183c2f490bd3349f1666427eaba4469fba0ac38dfd0d34
- languageName: node
- linkType: hard
-
-"p-limit@npm:^2.2.0":
- version: 2.3.0
- resolution: "p-limit@npm:2.3.0"
- dependencies:
- p-try: ^2.0.0
- checksum: 84ff17f1a38126c3314e91ecfe56aecbf36430940e2873dadaa773ffe072dc23b7af8e46d4b6485d302a11673fe94c6b67ca2cfbb60c989848b02100d0594ac1
- languageName: node
- linkType: hard
-
-"p-limit@npm:^3.1.0":
- version: 3.1.0
- resolution: "p-limit@npm:3.1.0"
- dependencies:
- yocto-queue: ^0.1.0
- checksum: 7c3690c4dbf62ef625671e20b7bdf1cbc9534e83352a2780f165b0d3ceba21907e77ad63401708145ca4e25bfc51636588d89a8c0aeb715e6c37d1c066430360
- languageName: node
- linkType: hard
-
-"p-limit@npm:^4.0.0":
- version: 4.0.0
- resolution: "p-limit@npm:4.0.0"
- dependencies:
- yocto-queue: ^1.0.0
- checksum: 01d9d70695187788f984226e16c903475ec6a947ee7b21948d6f597bed788e3112cc7ec2e171c1d37125057a5f45f3da21d8653e04a3a793589e12e9e80e756b
- languageName: node
- linkType: hard
-
-"p-locate@npm:^4.1.0":
- version: 4.1.0
- resolution: "p-locate@npm:4.1.0"
- dependencies:
- p-limit: ^2.2.0
- checksum: 513bd14a455f5da4ebfcb819ef706c54adb09097703de6aeaa5d26fe5ea16df92b48d1ac45e01e3944ce1e6aa2a66f7f8894742b8c9d6e276e16cd2049a2b870
- languageName: node
- linkType: hard
-
-"p-locate@npm:^6.0.0":
- version: 6.0.0
- resolution: "p-locate@npm:6.0.0"
- dependencies:
- p-limit: ^4.0.0
- checksum: 2bfe5234efa5e7a4e74b30a5479a193fdd9236f8f6b4d2f3f69e3d286d9a7d7ab0c118a2a50142efcf4e41625def635bd9332d6cbf9cc65d85eb0718c579ab38
- languageName: node
- linkType: hard
-
-"p-map@npm:^4.0.0":
- version: 4.0.0
- resolution: "p-map@npm:4.0.0"
- dependencies:
- aggregate-error: ^3.0.0
- checksum: cb0ab21ec0f32ddffd31dfc250e3afa61e103ef43d957cc45497afe37513634589316de4eb88abdfd969fe6410c22c0b93ab24328833b8eb1ccc087fc0442a1c
- languageName: node
- linkType: hard
-
-"p-try@npm:^2.0.0":
- version: 2.2.0
- resolution: "p-try@npm:2.2.0"
- checksum: f8a8e9a7693659383f06aec604ad5ead237c7a261c18048a6e1b5b85a5f8a067e469aa24f5bc009b991ea3b058a87f5065ef4176793a200d4917349881216cae
- languageName: node
- linkType: hard
-
-"parse-filepath@npm:^1.0.2":
- version: 1.0.2
- resolution: "parse-filepath@npm:1.0.2"
- dependencies:
- is-absolute: ^1.0.0
- map-cache: ^0.2.0
- path-root: ^0.1.1
- checksum: 6794c3f38d3921f0f7cc63fb1fb0c4d04cd463356ad389c8ce6726d3c50793b9005971f4138975a6d7025526058d5e65e9bfe634d0765e84c4e2571152665a69
- languageName: node
- linkType: hard
-
-"parse-json@npm:^5.2.0":
- version: 5.2.0
- resolution: "parse-json@npm:5.2.0"
- dependencies:
- "@babel/code-frame": ^7.0.0
- error-ex: ^1.3.1
- json-parse-even-better-errors: ^2.3.0
- lines-and-columns: ^1.1.6
- checksum: 62085b17d64da57f40f6afc2ac1f4d95def18c4323577e1eced571db75d9ab59b297d1d10582920f84b15985cbfc6b6d450ccbf317644cfa176f3ed982ad87e2
- languageName: node
- linkType: hard
-
-"parse-path@npm:^7.0.0":
- version: 7.0.0
- resolution: "parse-path@npm:7.0.0"
- dependencies:
- protocols: ^2.0.0
- checksum: 244b46523a58181d251dda9b888efde35d8afb957436598d948852f416d8c76ddb4f2010f9fc94218b4be3e5c0f716aa0d2026194a781e3b8981924142009302
- languageName: node
- linkType: hard
-
-"parse-url@npm:^8.1.0":
- version: 8.1.0
- resolution: "parse-url@npm:8.1.0"
- dependencies:
- parse-path: ^7.0.0
- checksum: b93e21ab4c93c7d7317df23507b41be7697694d4c94f49ed5c8d6288b01cba328fcef5ba388e147948eac20453dee0df9a67ab2012415189fff85973bdffe8d9
- languageName: node
- linkType: hard
-
-"parse5@npm:^6.0.0":
- version: 6.0.1
- resolution: "parse5@npm:6.0.1"
- checksum: 7d569a176c5460897f7c8f3377eff640d54132b9be51ae8a8fa4979af940830b2b0c296ce75e5bd8f4041520aadde13170dbdec44889975f906098ea0002f4bd
- languageName: node
- linkType: hard
-
-"path-exists@npm:^4.0.0":
- version: 4.0.0
- resolution: "path-exists@npm:4.0.0"
- checksum: 505807199dfb7c50737b057dd8d351b82c033029ab94cb10a657609e00c1bc53b951cfdbccab8de04c5584d5eff31128ce6afd3db79281874a5ef2adbba55ed1
- languageName: node
- linkType: hard
-
-"path-exists@npm:^5.0.0":
- version: 5.0.0
- resolution: "path-exists@npm:5.0.0"
- checksum: 8ca842868cab09423994596eb2c5ec2a971c17d1a3cb36dbf060592c730c725cd524b9067d7d2a1e031fef9ba7bd2ac6dc5ec9fb92aa693265f7be3987045254
- languageName: node
- linkType: hard
-
-"path-is-absolute@npm:^1.0.0":
- version: 1.0.1
- resolution: "path-is-absolute@npm:1.0.1"
- checksum: 060840f92cf8effa293bcc1bea81281bd7d363731d214cbe5c227df207c34cd727430f70c6037b5159c8a870b9157cba65e775446b0ab06fd5ecc7e54615a3b8
- languageName: node
- linkType: hard
-
-"path-key@npm:^3.0.0, path-key@npm:^3.1.0":
- version: 3.1.1
- resolution: "path-key@npm:3.1.1"
- checksum: 55cd7a9dd4b343412a8386a743f9c746ef196e57c823d90ca3ab917f90ab9f13dd0ded27252ba49dbdfcab2b091d998bc446f6220cd3cea65db407502a740020
- languageName: node
- linkType: hard
-
-"path-parse@npm:^1.0.7":
- version: 1.0.7
- resolution: "path-parse@npm:1.0.7"
- checksum: 49abf3d81115642938a8700ec580da6e830dde670be21893c62f4e10bd7dd4c3742ddc603fe24f898cba7eb0c6bc1777f8d9ac14185d34540c6d4d80cd9cae8a
- languageName: node
- linkType: hard
-
-"path-root-regex@npm:^0.1.0":
- version: 0.1.2
- resolution: "path-root-regex@npm:0.1.2"
- checksum: dcd75d1f8e93faabe35a58e875b0f636839b3658ff2ad8c289463c40bc1a844debe0dab73c3398ef9dc8f6ec6c319720aff390cf4633763ddcf3cf4b1bbf7e8b
- languageName: node
- linkType: hard
-
-"path-root@npm:^0.1.1":
- version: 0.1.1
- resolution: "path-root@npm:0.1.1"
- dependencies:
- path-root-regex: ^0.1.0
- checksum: ff88aebfc1c59ace510cc06703d67692a11530989920427625e52b66a303ca9b3d4059b0b7d0b2a73248d1ad29bcb342b8b786ec00592f3101d38a45fd3b2e08
- languageName: node
- linkType: hard
-
-"path-scurry@npm:^1.10.1":
- version: 1.10.1
- resolution: "path-scurry@npm:1.10.1"
- dependencies:
- lru-cache: ^9.1.1 || ^10.0.0
- minipass: ^5.0.0 || ^6.0.2 || ^7.0.0
- checksum: e2557cff3a8fb8bc07afdd6ab163a92587884f9969b05bbbaf6fe7379348bfb09af9ed292af12ed32398b15fb443e81692047b786d1eeb6d898a51eb17ed7d90
- languageName: node
- linkType: hard
-
-"picocolors@npm:^1.0.0":
- version: 1.0.0
- resolution: "picocolors@npm:1.0.0"
- checksum: a2e8092dd86c8396bdba9f2b5481032848525b3dc295ce9b57896f931e63fc16f79805144321f72976383fc249584672a75cc18d6777c6b757603f372f745981
- languageName: node
- linkType: hard
-
-"picomatch@npm:^2.0.4, picomatch@npm:^2.2.1, picomatch@npm:^2.2.3, picomatch@npm:^2.3.1":
- version: 2.3.1
- resolution: "picomatch@npm:2.3.1"
- checksum: 050c865ce81119c4822c45d3c84f1ced46f93a0126febae20737bd05ca20589c564d6e9226977df859ed5e03dc73f02584a2b0faad36e896936238238b0446cf
- languageName: node
- linkType: hard
-
-"pify@npm:^6.0.0":
- version: 6.1.0
- resolution: "pify@npm:6.1.0"
- checksum: cb21ee8794e9e14955669fbc06b964d0dd3d4e6fa3c2ea3cf22f6794de61ec1ea5c1fac02dadfd4aa16c9cf589f6733406e826e64366f70e09f5e95917a0b8ac
- languageName: node
- linkType: hard
-
-"pirates@npm:^4.0.4":
- version: 4.0.6
- resolution: "pirates@npm:4.0.6"
- checksum: 46a65fefaf19c6f57460388a5af9ab81e3d7fd0e7bc44ca59d753cb5c4d0df97c6c6e583674869762101836d68675f027d60f841c105d72734df9dfca97cbcc6
- languageName: node
- linkType: hard
-
-"pkg-dir@npm:^4.2.0":
- version: 4.2.0
- resolution: "pkg-dir@npm:4.2.0"
- dependencies:
- find-up: ^4.0.0
- checksum: 9863e3f35132bf99ae1636d31ff1e1e3501251d480336edb1c211133c8d58906bed80f154a1d723652df1fda91e01c7442c2eeaf9dc83157c7ae89087e43c8d6
- languageName: node
- linkType: hard
-
-"postcss@npm:^8.4.32":
- version: 8.4.32
- resolution: "postcss@npm:8.4.32"
- dependencies:
- nanoid: ^3.3.7
- picocolors: ^1.0.0
- source-map-js: ^1.0.2
- checksum: 220d9d0bf5d65be7ed31006c523bfb11619461d296245c1231831f90150aeb4a31eab9983ac9c5c89759a3ca8b60b3e0d098574964e1691673c3ce5c494305ae
- languageName: node
- linkType: hard
-
-"prebuildify@npm:^5.0.1":
- version: 5.0.1
- resolution: "prebuildify@npm:5.0.1"
- dependencies:
- execspawn: ^1.0.1
- minimist: ^1.2.5
- mkdirp-classic: ^0.5.3
- node-abi: ^3.3.0
- npm-run-path: ^3.1.0
- pump: ^3.0.0
- tar-fs: ^2.1.0
- bin:
- prebuildify: bin.js
- checksum: d71a6410efe8a2819d629eff3290c57c6e125ca87004682d97caca18feedf498d492ab3af933a640ffbcb675b6b38fd4f51337e160916ab09cccef6c6719258c
- languageName: node
- linkType: hard
-
-"prettier@npm:^2.8.8":
- version: 2.8.8
- resolution: "prettier@npm:2.8.8"
- bin:
- prettier: bin-prettier.js
- checksum: b49e409431bf129dd89238d64299ba80717b57ff5a6d1c1a8b1a28b590d998a34e083fa13573bc732bb8d2305becb4c9a4407f8486c81fa7d55100eb08263cf8
- languageName: node
- linkType: hard
-
-"pretty-format@npm:^29.7.0":
- version: 29.7.0
- resolution: "pretty-format@npm:29.7.0"
- dependencies:
- "@jest/schemas": ^29.6.3
- ansi-styles: ^5.0.0
- react-is: ^18.0.0
- checksum: 032c1602383e71e9c0c02a01bbd25d6759d60e9c7cf21937dde8357aa753da348fcec5def5d1002c9678a8524d5fe099ad98861286550ef44de8808cc61e43b6
- languageName: node
- linkType: hard
-
-"proc-log@npm:^3.0.0":
- version: 3.0.0
- resolution: "proc-log@npm:3.0.0"
- checksum: 02b64e1b3919e63df06f836b98d3af002b5cd92655cab18b5746e37374bfb73e03b84fe305454614b34c25b485cc687a9eebdccf0242cda8fda2475dd2c97e02
- languageName: node
- linkType: hard
-
-"promise-inflight@npm:^1.0.1":
- version: 1.0.1
- resolution: "promise-inflight@npm:1.0.1"
- checksum: 22749483091d2c594261517f4f80e05226d4d5ecc1fc917e1886929da56e22b5718b7f2a75f3807e7a7d471bc3be2907fe92e6e8f373ddf5c64bae35b5af3981
- languageName: node
- linkType: hard
-
-"promise-retry@npm:^2.0.1":
- version: 2.0.1
- resolution: "promise-retry@npm:2.0.1"
- dependencies:
- err-code: ^2.0.2
- retry: ^0.12.0
- checksum: f96a3f6d90b92b568a26f71e966cbbc0f63ab85ea6ff6c81284dc869b41510e6cdef99b6b65f9030f0db422bf7c96652a3fff9f2e8fb4a0f069d8f4430359429
- languageName: node
- linkType: hard
-
-"prompts@npm:^2.0.1":
- version: 2.4.2
- resolution: "prompts@npm:2.4.2"
- dependencies:
- kleur: ^3.0.3
- sisteransi: ^1.0.5
- checksum: d8fd1fe63820be2412c13bfc5d0a01909acc1f0367e32396962e737cb2fc52d004f3302475d5ce7d18a1e8a79985f93ff04ee03007d091029c3f9104bffc007d
- languageName: node
- linkType: hard
-
-"property-information@npm:^6.0.0":
- version: 6.4.0
- resolution: "property-information@npm:6.4.0"
- checksum: b5aed9a40e87730995f3ceed29839f137fa73b2a4cccfb8ed72ab8bddb8881cad05c3487c4aa168d7cb49a53db8089790c9f00f59d15b8380d2bb5383cdd1f24
- languageName: node
- linkType: hard
-
-"protocols@npm:^2.0.0, protocols@npm:^2.0.1":
- version: 2.0.1
- resolution: "protocols@npm:2.0.1"
- checksum: 4a9bef6aa0449a0245ded319ac3cbfd032c3e76ebb562777037a3a832c99253d0e8bc2847f7be350236df620a11f7d4fe683ea7f59a2cc14c69f746b6259eda4
- languageName: node
- linkType: hard
-
-"pump@npm:^3.0.0":
- version: 3.0.0
- resolution: "pump@npm:3.0.0"
- dependencies:
- end-of-stream: ^1.1.0
- once: ^1.3.1
- checksum: e42e9229fba14732593a718b04cb5e1cfef8254544870997e0ecd9732b189a48e1256e4e5478148ecb47c8511dca2b09eae56b4d0aad8009e6fac8072923cfc9
- languageName: node
- linkType: hard
-
-"pure-rand@npm:^6.0.0":
- version: 6.0.4
- resolution: "pure-rand@npm:6.0.4"
- checksum: e1c4e69f8bf7303e5252756d67c3c7551385cd34d94a1f511fe099727ccbab74c898c03a06d4c4a24a89b51858781057b83ebbfe740d984240cdc04fead36068
- languageName: node
- linkType: hard
-
-"react-is@npm:^18.0.0":
- version: 18.2.0
- resolution: "react-is@npm:18.2.0"
- checksum: e72d0ba81b5922759e4aff17e0252bd29988f9642ed817f56b25a3e217e13eea8a7f2322af99a06edb779da12d5d636e9fda473d620df9a3da0df2a74141d53e
- languageName: node
- linkType: hard
-
-"read-pkg-up@npm:^9.1.0":
- version: 9.1.0
- resolution: "read-pkg-up@npm:9.1.0"
- dependencies:
- find-up: ^6.3.0
- read-pkg: ^7.1.0
- type-fest: ^2.5.0
- checksum: 41b8ba4bdb7c1e914aa6ce2d36a7c1651e9086938977fa12f058f6fca51ee15315634af648ca4ef70dd074e575e854616b39032ad0b376e9e97d61a9d0867afe
- languageName: node
- linkType: hard
-
-"read-pkg@npm:^7.1.0":
- version: 7.1.0
- resolution: "read-pkg@npm:7.1.0"
- dependencies:
- "@types/normalize-package-data": ^2.4.1
- normalize-package-data: ^3.0.2
- parse-json: ^5.2.0
- type-fest: ^2.0.0
- checksum: 20d11c59be3ae1fc79d4b9c8594dabeaec58105f9dfd710570ef9690ec2ac929247006e79ca114257683228663199735d60f149948dbc5f34fcd2d28883ab5f7
- languageName: node
- linkType: hard
-
-"readable-stream@npm:^3.1.1, readable-stream@npm:^3.4.0, readable-stream@npm:^3.6.0":
- version: 3.6.2
- resolution: "readable-stream@npm:3.6.2"
- dependencies:
- inherits: ^2.0.3
- string_decoder: ^1.1.1
- util-deprecate: ^1.0.1
- checksum: bdcbe6c22e846b6af075e32cf8f4751c2576238c5043169a1c221c92ee2878458a816a4ea33f4c67623c0b6827c8a400409bfb3cf0bf3381392d0b1dfb52ac8d
- languageName: node
- linkType: hard
-
-"readdirp@npm:~3.6.0":
- version: 3.6.0
- resolution: "readdirp@npm:3.6.0"
- dependencies:
- picomatch: ^2.2.1
- checksum: 1ced032e6e45670b6d7352d71d21ce7edf7b9b928494dcaba6f11fba63180d9da6cd7061ebc34175ffda6ff529f481818c962952004d273178acd70f7059b320
- languageName: node
- linkType: hard
-
-"remark-gfm@npm:^3.0.1":
- version: 3.0.1
- resolution: "remark-gfm@npm:3.0.1"
- dependencies:
- "@types/mdast": ^3.0.0
- mdast-util-gfm: ^2.0.0
- micromark-extension-gfm: ^2.0.0
- unified: ^10.0.0
- checksum: 02254f74d67b3419c2c9cf62d799ec35f6c6cd74db25c001361751991552a7ce86049a972107bff8122d85d15ae4a8d1a0618f3bc01a7df837af021ae9b2a04e
- languageName: node
- linkType: hard
-
-"remark-html@npm:^15.0.1":
- version: 15.0.2
- resolution: "remark-html@npm:15.0.2"
- dependencies:
- "@types/mdast": ^3.0.0
- hast-util-sanitize: ^4.0.0
- hast-util-to-html: ^8.0.0
- mdast-util-to-hast: ^12.0.0
- unified: ^10.0.0
- checksum: 1fe1bf1fd229f5ae999626f274b9e6a972c1406533f5e78392c1994e01ec35511d0d307a927bb22a828e51007c80f12f3befcea73e9f5f01db7bed229a989d54
- languageName: node
- linkType: hard
-
-"remark-parse@npm:^10.0.0":
- version: 10.0.2
- resolution: "remark-parse@npm:10.0.2"
- dependencies:
- "@types/mdast": ^3.0.0
- mdast-util-from-markdown: ^1.0.0
- unified: ^10.0.0
- checksum: 5041b4b44725f377e69986e02f8f072ae2222db5e7d3b6c80829756b842e811343ffc2069cae1f958a96bfa36104ab91a57d7d7e2f0cef521e210ab8c614d5c7
- languageName: node
- linkType: hard
-
-"remark-reference-links@npm:^6.0.1":
- version: 6.0.1
- resolution: "remark-reference-links@npm:6.0.1"
- dependencies:
- "@types/mdast": ^3.0.0
- unified: ^10.0.0
- unist-util-visit: ^4.0.0
- checksum: 9b538e277f6749a25e68f2e6c0862bb0c44204723e92b719c49aacea7d46182ad046f964fa24b6f4b4e545242429c915d073ddc7089081c676a45d85e493beab
- languageName: node
- linkType: hard
-
-"remark-stringify@npm:^10.0.0":
- version: 10.0.3
- resolution: "remark-stringify@npm:10.0.3"
- dependencies:
- "@types/mdast": ^3.0.0
- mdast-util-to-markdown: ^1.0.0
- unified: ^10.0.0
- checksum: 6004e204fba672ee322c3cf0bef090e95802feedf7ef875f88b120c5e6208f1eb09c014486d5ca42a1e199c0a17ce0ed165fb248c66608458afed4bdca51dd3a
- languageName: node
- linkType: hard
-
-"remark-toc@npm:^8.0.1":
- version: 8.0.1
- resolution: "remark-toc@npm:8.0.1"
- dependencies:
- "@types/mdast": ^3.0.0
- mdast-util-toc: ^6.0.0
- unified: ^10.0.0
- checksum: 4a8d6dbde402c9eece0fc8774c1d639fa5cd8d4ba75a7c2ff848f239d60f52e0d055467bbc11b7871508f0ecac13ec8013a89d4131f0b242bc08fb6fec7de517
- languageName: node
- linkType: hard
-
-"remark@npm:^14.0.2":
- version: 14.0.3
- resolution: "remark@npm:14.0.3"
- dependencies:
- "@types/mdast": ^3.0.0
- remark-parse: ^10.0.0
- remark-stringify: ^10.0.0
- unified: ^10.0.0
- checksum: 36eec9668c5f5e497507fa5d396c79183265a5f7dd204a608e7f031a4f61b48f7bb5cfaec212f5614ccd1266cc4a9f8d7a59a45e95aed9876986b4c453b191be
- languageName: node
- linkType: hard
-
-"require-directory@npm:^2.1.1":
- version: 2.1.1
- resolution: "require-directory@npm:2.1.1"
- checksum: fb47e70bf0001fdeabdc0429d431863e9475e7e43ea5f94ad86503d918423c1543361cc5166d713eaa7029dd7a3d34775af04764bebff99ef413111a5af18c80
- languageName: node
- linkType: hard
-
-"resolve-cwd@npm:^3.0.0":
- version: 3.0.0
- resolution: "resolve-cwd@npm:3.0.0"
- dependencies:
- resolve-from: ^5.0.0
- checksum: 546e0816012d65778e580ad62b29e975a642989108d9a3c5beabfb2304192fa3c9f9146fbdfe213563c6ff51975ae41bac1d3c6e047dd9572c94863a057b4d81
- languageName: node
- linkType: hard
-
-"resolve-from@npm:^5.0.0":
- version: 5.0.0
- resolution: "resolve-from@npm:5.0.0"
- checksum: 4ceeb9113e1b1372d0cd969f3468fa042daa1dd9527b1b6bb88acb6ab55d8b9cd65dbf18819f9f9ddf0db804990901dcdaade80a215e7b2c23daae38e64f5bdf
- languageName: node
- linkType: hard
-
-"resolve.exports@npm:^2.0.0":
- version: 2.0.2
- resolution: "resolve.exports@npm:2.0.2"
- checksum: 1c7778ca1b86a94f8ab4055d196c7d87d1874b96df4d7c3e67bbf793140f0717fd506dcafd62785b079cd6086b9264424ad634fb904409764c3509c3df1653f2
- languageName: node
- linkType: hard
-
-"resolve@npm:^1.20.0, resolve@npm:^1.22.1":
- version: 1.22.8
- resolution: "resolve@npm:1.22.8"
- dependencies:
- is-core-module: ^2.13.0
- path-parse: ^1.0.7
- supports-preserve-symlinks-flag: ^1.0.0
- bin:
- resolve: bin/resolve
- checksum: f8a26958aa572c9b064562750b52131a37c29d072478ea32e129063e2da7f83e31f7f11e7087a18225a8561cfe8d2f0df9dbea7c9d331a897571c0a2527dbb4c
- languageName: node
- linkType: hard
-
-"resolve@patch:resolve@^1.20.0#~builtin, resolve@patch:resolve@^1.22.1#~builtin":
- version: 1.22.8
- resolution: "resolve@patch:resolve@npm%3A1.22.8#~builtin::version=1.22.8&hash=c3c19d"
- dependencies:
- is-core-module: ^2.13.0
- path-parse: ^1.0.7
- supports-preserve-symlinks-flag: ^1.0.0
- bin:
- resolve: bin/resolve
- checksum: 5479b7d431cacd5185f8db64bfcb7286ae5e31eb299f4c4f404ad8aa6098b77599563ac4257cb2c37a42f59dfc06a1bec2bcf283bb448f319e37f0feb9a09847
- languageName: node
- linkType: hard
-
-"retry@npm:^0.12.0":
- version: 0.12.0
- resolution: "retry@npm:0.12.0"
- checksum: 623bd7d2e5119467ba66202d733ec3c2e2e26568074923bc0585b6b99db14f357e79bdedb63cab56cec47491c4a0da7e6021a7465ca6dc4f481d3898fdd3158c
- languageName: node
- linkType: hard
-
-"rimraf@npm:^3.0.2":
- version: 3.0.2
- resolution: "rimraf@npm:3.0.2"
- dependencies:
- glob: ^7.1.3
- bin:
- rimraf: bin.js
- checksum: 87f4164e396f0171b0a3386cc1877a817f572148ee13a7e113b238e48e8a9f2f31d009a92ec38a591ff1567d9662c6b67fd8818a2dbbaed74bc26a87a2a4a9a0
- languageName: node
- linkType: hard
-
-"sade@npm:^1.7.3":
- version: 1.8.1
- resolution: "sade@npm:1.8.1"
- dependencies:
- mri: ^1.1.0
- checksum: 0756e5b04c51ccdc8221ebffd1548d0ce5a783a44a0fa9017a026659b97d632913e78f7dca59f2496aa996a0be0b0c322afd87ca72ccd909406f49dbffa0f45d
- languageName: node
- linkType: hard
-
-"safe-buffer@npm:~5.2.0":
- version: 5.2.1
- resolution: "safe-buffer@npm:5.2.1"
- checksum: b99c4b41fdd67a6aaf280fcd05e9ffb0813654894223afb78a31f14a19ad220bba8aba1cb14eddce1fcfb037155fe6de4e861784eb434f7d11ed58d1e70dd491
- languageName: node
- linkType: hard
-
-"safer-buffer@npm:>= 2.1.2 < 3.0.0":
- version: 2.1.2
- resolution: "safer-buffer@npm:2.1.2"
- checksum: cab8f25ae6f1434abee8d80023d7e72b598cf1327164ddab31003c51215526801e40b66c5e65d658a0af1e9d6478cadcb4c745f4bd6751f97d8644786c0978b0
- languageName: node
- linkType: hard
-
-"semver@npm:^6.3.0, semver@npm:^6.3.1":
- version: 6.3.1
- resolution: "semver@npm:6.3.1"
- bin:
- semver: bin/semver.js
- checksum: ae47d06de28836adb9d3e25f22a92943477371292d9b665fb023fae278d345d508ca1958232af086d85e0155aee22e313e100971898bbb8d5d89b8b1d4054ca2
- languageName: node
- linkType: hard
-
-"semver@npm:^7.3.4, semver@npm:^7.3.5, semver@npm:^7.5.3, semver@npm:^7.5.4":
- version: 7.5.4
- resolution: "semver@npm:7.5.4"
- dependencies:
- lru-cache: ^6.0.0
- bin:
- semver: bin/semver.js
- checksum: 12d8ad952fa353b0995bf180cdac205a4068b759a140e5d3c608317098b3575ac2f1e09182206bf2eb26120e1c0ed8fb92c48c592f6099680de56bb071423ca3
- languageName: node
- linkType: hard
-
-"set-blocking@npm:^2.0.0":
- version: 2.0.0
- resolution: "set-blocking@npm:2.0.0"
- checksum: 6e65a05f7cf7ebdf8b7c75b101e18c0b7e3dff4940d480efed8aad3a36a4005140b660fa1d804cb8bce911cac290441dc728084a30504d3516ac2ff7ad607b02
- languageName: node
- linkType: hard
-
-"shebang-command@npm:^2.0.0":
- version: 2.0.0
- resolution: "shebang-command@npm:2.0.0"
- dependencies:
- shebang-regex: ^3.0.0
- checksum: 6b52fe87271c12968f6a054e60f6bde5f0f3d2db483a1e5c3e12d657c488a15474121a1d55cd958f6df026a54374ec38a4a963988c213b7570e1d51575cea7fa
- languageName: node
- linkType: hard
-
-"shebang-regex@npm:^3.0.0":
- version: 3.0.0
- resolution: "shebang-regex@npm:3.0.0"
- checksum: 1a2bcae50de99034fcd92ad4212d8e01eedf52c7ec7830eedcf886622804fe36884278f2be8be0ea5fde3fd1c23911643a4e0f726c8685b61871c8908af01222
- languageName: node
- linkType: hard
-
-"signal-exit@npm:^3.0.3, signal-exit@npm:^3.0.7":
- version: 3.0.7
- resolution: "signal-exit@npm:3.0.7"
- checksum: a2f098f247adc367dffc27845853e9959b9e88b01cb301658cfe4194352d8d2bb32e18467c786a7fe15f1d44b233ea35633d076d5e737870b7139949d1ab6318
- languageName: node
- linkType: hard
-
-"signal-exit@npm:^4.0.1":
- version: 4.1.0
- resolution: "signal-exit@npm:4.1.0"
- checksum: 64c757b498cb8629ffa5f75485340594d2f8189e9b08700e69199069c8e3070fb3e255f7ab873c05dc0b3cec412aea7402e10a5990cb6a050bd33ba062a6c549
- languageName: node
- linkType: hard
-
-"sisteransi@npm:^1.0.5":
- version: 1.0.5
- resolution: "sisteransi@npm:1.0.5"
- checksum: aba6438f46d2bfcef94cf112c835ab395172c75f67453fe05c340c770d3c402363018ae1ab4172a1026a90c47eaccf3af7b6ff6fa749a680c2929bd7fa2b37a4
- languageName: node
- linkType: hard
-
-"slash@npm:^3.0.0":
- version: 3.0.0
- resolution: "slash@npm:3.0.0"
- checksum: 94a93fff615f25a999ad4b83c9d5e257a7280c90a32a7cb8b4a87996e4babf322e469c42b7f649fd5796edd8687652f3fb452a86dc97a816f01113183393f11c
- languageName: node
- linkType: hard
-
-"smart-buffer@npm:^4.2.0":
- version: 4.2.0
- resolution: "smart-buffer@npm:4.2.0"
- checksum: b5167a7142c1da704c0e3af85c402002b597081dd9575031a90b4f229ca5678e9a36e8a374f1814c8156a725d17008ae3bde63b92f9cfd132526379e580bec8b
- languageName: node
- linkType: hard
-
-"socks-proxy-agent@npm:^7.0.0":
- version: 7.0.0
- resolution: "socks-proxy-agent@npm:7.0.0"
- dependencies:
- agent-base: ^6.0.2
- debug: ^4.3.3
- socks: ^2.6.2
- checksum: 720554370154cbc979e2e9ce6a6ec6ced205d02757d8f5d93fe95adae454fc187a5cbfc6b022afab850a5ce9b4c7d73e0f98e381879cf45f66317a4895953846
- languageName: node
- linkType: hard
-
-"socks-proxy-agent@npm:^8.0.1":
- version: 8.0.2
- resolution: "socks-proxy-agent@npm:8.0.2"
- dependencies:
- agent-base: ^7.0.2
- debug: ^4.3.4
- socks: ^2.7.1
- checksum: 4fb165df08f1f380881dcd887b3cdfdc1aba3797c76c1e9f51d29048be6e494c5b06d68e7aea2e23df4572428f27a3ec22b3d7c75c570c5346507433899a4b6d
- languageName: node
- linkType: hard
-
-"socks@npm:^2.6.2, socks@npm:^2.7.1":
- version: 2.7.1
- resolution: "socks@npm:2.7.1"
- dependencies:
- ip: ^2.0.0
- smart-buffer: ^4.2.0
- checksum: 259d9e3e8e1c9809a7f5c32238c3d4d2a36b39b83851d0f573bfde5f21c4b1288417ce1af06af1452569cd1eb0841169afd4998f0e04ba04656f6b7f0e46d748
- languageName: node
- linkType: hard
-
-"source-map-js@npm:^1.0.2":
- version: 1.0.2
- resolution: "source-map-js@npm:1.0.2"
- checksum: c049a7fc4deb9a7e9b481ae3d424cc793cb4845daa690bc5a05d428bf41bf231ced49b4cf0c9e77f9d42fdb3d20d6187619fc586605f5eabe995a316da8d377c
- languageName: node
- linkType: hard
-
-"source-map-support@npm:0.5.13":
- version: 0.5.13
- resolution: "source-map-support@npm:0.5.13"
- dependencies:
- buffer-from: ^1.0.0
- source-map: ^0.6.0
- checksum: 933550047b6c1a2328599a21d8b7666507427c0f5ef5eaadd56b5da0fd9505e239053c66fe181bf1df469a3b7af9d775778eee283cbb7ae16b902ddc09e93a97
- languageName: node
- linkType: hard
-
-"source-map@npm:^0.6.0, source-map@npm:^0.6.1":
- version: 0.6.1
- resolution: "source-map@npm:0.6.1"
- checksum: 59ce8640cf3f3124f64ac289012c2b8bd377c238e316fb323ea22fbfe83da07d81e000071d7242cad7a23cd91c7de98e4df8830ec3f133cb6133a5f6e9f67bc2
- languageName: node
- linkType: hard
-
-"space-separated-tokens@npm:^2.0.0":
- version: 2.0.2
- resolution: "space-separated-tokens@npm:2.0.2"
- checksum: 202e97d7ca1ba0758a0aa4fe226ff98142073bcceeff2da3aad037968878552c3bbce3b3231970025375bbba5aee00c5b8206eda408da837ab2dc9c0f26be990
- languageName: node
- linkType: hard
-
-"spdx-correct@npm:^3.0.0":
- version: 3.2.0
- resolution: "spdx-correct@npm:3.2.0"
- dependencies:
- spdx-expression-parse: ^3.0.0
- spdx-license-ids: ^3.0.0
- checksum: e9ae98d22f69c88e7aff5b8778dc01c361ef635580e82d29e5c60a6533cc8f4d820803e67d7432581af0cc4fb49973125076ee3b90df191d153e223c004193b2
- languageName: node
- linkType: hard
-
-"spdx-exceptions@npm:^2.1.0":
- version: 2.3.0
- resolution: "spdx-exceptions@npm:2.3.0"
- checksum: cb69a26fa3b46305637123cd37c85f75610e8c477b6476fa7354eb67c08128d159f1d36715f19be6f9daf4b680337deb8c65acdcae7f2608ba51931540687ac0
- languageName: node
- linkType: hard
-
-"spdx-expression-parse@npm:^3.0.0":
- version: 3.0.1
- resolution: "spdx-expression-parse@npm:3.0.1"
- dependencies:
- spdx-exceptions: ^2.1.0
- spdx-license-ids: ^3.0.0
- checksum: a1c6e104a2cbada7a593eaa9f430bd5e148ef5290d4c0409899855ce8b1c39652bcc88a725259491a82601159d6dc790bedefc9016c7472f7de8de7361f8ccde
- languageName: node
- linkType: hard
-
-"spdx-license-ids@npm:^3.0.0":
- version: 3.0.16
- resolution: "spdx-license-ids@npm:3.0.16"
- checksum: 5cdaa85aaa24bd02f9353a2e357b4df0a4f205cb35655f3fd0a5674a4fb77081f28ffd425379214bc3be2c2b7593ce1215df6bcc75884aeee0a9811207feabe2
- languageName: node
- linkType: hard
-
-"sprintf-js@npm:~1.0.2":
- version: 1.0.3
- resolution: "sprintf-js@npm:1.0.3"
- checksum: 19d79aec211f09b99ec3099b5b2ae2f6e9cdefe50bc91ac4c69144b6d3928a640bb6ae5b3def70c2e85a2c3d9f5ec2719921e3a59d3ca3ef4b2fd1a4656a0df3
- languageName: node
- linkType: hard
-
-"ssri@npm:^10.0.0":
- version: 10.0.5
- resolution: "ssri@npm:10.0.5"
- dependencies:
- minipass: ^7.0.3
- checksum: 0a31b65f21872dea1ed3f7c200d7bc1c1b91c15e419deca14f282508ba917cbb342c08a6814c7f68ca4ca4116dd1a85da2bbf39227480e50125a1ceffeecb750
- languageName: node
- linkType: hard
-
-"ssri@npm:^9.0.0":
- version: 9.0.1
- resolution: "ssri@npm:9.0.1"
- dependencies:
- minipass: ^3.1.1
- checksum: fb58f5e46b6923ae67b87ad5ef1c5ab6d427a17db0bead84570c2df3cd50b4ceb880ebdba2d60726588272890bae842a744e1ecce5bd2a2a582fccd5068309eb
- languageName: node
- linkType: hard
-
-"stack-utils@npm:^2.0.3":
- version: 2.0.6
- resolution: "stack-utils@npm:2.0.6"
- dependencies:
- escape-string-regexp: ^2.0.0
- checksum: 052bf4d25bbf5f78e06c1d5e67de2e088b06871fa04107ca8d3f0e9d9263326e2942c8bedee3545795fc77d787d443a538345eef74db2f8e35db3558c6f91ff7
- languageName: node
- linkType: hard
-
-"string-length@npm:^4.0.1":
- version: 4.0.2
- resolution: "string-length@npm:4.0.2"
- dependencies:
- char-regex: ^1.0.2
- strip-ansi: ^6.0.0
- checksum: ce85533ef5113fcb7e522bcf9e62cb33871aa99b3729cec5595f4447f660b0cefd542ca6df4150c97a677d58b0cb727a3fe09ac1de94071d05526c73579bf505
- languageName: node
- linkType: hard
-
-"string-width-cjs@npm:string-width@^4.2.0, string-width@npm:^1.0.2 || 2 || 3 || 4, string-width@npm:^4.1.0, string-width@npm:^4.2.0, string-width@npm:^4.2.3":
- version: 4.2.3
- resolution: "string-width@npm:4.2.3"
- dependencies:
- emoji-regex: ^8.0.0
- is-fullwidth-code-point: ^3.0.0
- strip-ansi: ^6.0.1
- checksum: e52c10dc3fbfcd6c3a15f159f54a90024241d0f149cf8aed2982a2d801d2e64df0bf1dc351cf8e95c3319323f9f220c16e740b06faecd53e2462df1d2b5443fb
- languageName: node
- linkType: hard
-
-"string-width@npm:^5.0.0, string-width@npm:^5.0.1, string-width@npm:^5.1.2":
- version: 5.1.2
- resolution: "string-width@npm:5.1.2"
- dependencies:
- eastasianwidth: ^0.2.0
- emoji-regex: ^9.2.2
- strip-ansi: ^7.0.1
- checksum: 7369deaa29f21dda9a438686154b62c2c5f661f8dda60449088f9f980196f7908fc39fdd1803e3e01541970287cf5deae336798337e9319a7055af89dafa7193
- languageName: node
- linkType: hard
-
-"string_decoder@npm:^1.1.1":
- version: 1.3.0
- resolution: "string_decoder@npm:1.3.0"
- dependencies:
- safe-buffer: ~5.2.0
- checksum: 8417646695a66e73aefc4420eb3b84cc9ffd89572861fe004e6aeb13c7bc00e2f616247505d2dbbef24247c372f70268f594af7126f43548565c68c117bdeb56
- languageName: node
- linkType: hard
-
-"stringify-entities@npm:^4.0.0":
- version: 4.0.3
- resolution: "stringify-entities@npm:4.0.3"
- dependencies:
- character-entities-html4: ^2.0.0
- character-entities-legacy: ^3.0.0
- checksum: 59e8f523b403bf7d415690e72ae52982decd6ea5426bd8b3f5c66225ddde73e766c0c0d91627df082d0794e30b19dd907ffb5864cef3602e4098d6777d7ca3c2
- languageName: node
- linkType: hard
-
-"strip-ansi-cjs@npm:strip-ansi@^6.0.1, strip-ansi@npm:^6.0.0, strip-ansi@npm:^6.0.1":
- version: 6.0.1
- resolution: "strip-ansi@npm:6.0.1"
- dependencies:
- ansi-regex: ^5.0.1
- checksum: f3cd25890aef3ba6e1a74e20896c21a46f482e93df4a06567cebf2b57edabb15133f1f94e57434e0a958d61186087b1008e89c94875d019910a213181a14fc8c
- languageName: node
- linkType: hard
-
-"strip-ansi@npm:^7.0.1":
- version: 7.1.0
- resolution: "strip-ansi@npm:7.1.0"
- dependencies:
- ansi-regex: ^6.0.1
- checksum: 859c73fcf27869c22a4e4d8c6acfe690064659e84bef9458aa6d13719d09ca88dcfd40cbf31fd0be63518ea1a643fe070b4827d353e09533a5b0b9fd4553d64d
- languageName: node
- linkType: hard
-
-"strip-bom@npm:^4.0.0":
- version: 4.0.0
- resolution: "strip-bom@npm:4.0.0"
- checksum: 9dbcfbaf503c57c06af15fe2c8176fb1bf3af5ff65003851a102749f875a6dbe0ab3b30115eccf6e805e9d756830d3e40ec508b62b3f1ddf3761a20ebe29d3f3
- languageName: node
- linkType: hard
-
-"strip-final-newline@npm:^2.0.0":
- version: 2.0.0
- resolution: "strip-final-newline@npm:2.0.0"
- checksum: 69412b5e25731e1938184b5d489c32e340605bb611d6140344abc3421b7f3c6f9984b21dff296dfcf056681b82caa3bb4cc996a965ce37bcfad663e92eae9c64
- languageName: node
- linkType: hard
-
-"strip-json-comments@npm:^3.1.1":
- version: 3.1.1
- resolution: "strip-json-comments@npm:3.1.1"
- checksum: 492f73e27268f9b1c122733f28ecb0e7e8d8a531a6662efbd08e22cccb3f9475e90a1b82cab06a392f6afae6d2de636f977e231296400d0ec5304ba70f166443
- languageName: node
- linkType: hard
-
-"strip-json-comments@npm:^5.0.0":
- version: 5.0.1
- resolution: "strip-json-comments@npm:5.0.1"
- checksum: b314af70c6666a71133e309a571bdb87687fc878d9fd8b38ebed393a77b89835b92f191aa6b0bc10dfd028ba99eed6b6365985001d64c5aef32a4a82456a156b
- languageName: node
- linkType: hard
-
-"supports-color@npm:^5.3.0":
- version: 5.5.0
- resolution: "supports-color@npm:5.5.0"
- dependencies:
- has-flag: ^3.0.0
- checksum: 95f6f4ba5afdf92f495b5a912d4abee8dcba766ae719b975c56c084f5004845f6f5a5f7769f52d53f40e21952a6d87411bafe34af4a01e65f9926002e38e1dac
- languageName: node
- linkType: hard
-
-"supports-color@npm:^7.1.0":
- version: 7.2.0
- resolution: "supports-color@npm:7.2.0"
- dependencies:
- has-flag: ^4.0.0
- checksum: 3dda818de06ebbe5b9653e07842d9479f3555ebc77e9a0280caf5a14fb877ffee9ed57007c3b78f5a6324b8dbeec648d9e97a24e2ed9fdb81ddc69ea07100f4a
- languageName: node
- linkType: hard
-
-"supports-color@npm:^8.0.0":
- version: 8.1.1
- resolution: "supports-color@npm:8.1.1"
- dependencies:
- has-flag: ^4.0.0
- checksum: c052193a7e43c6cdc741eb7f378df605636e01ad434badf7324f17fb60c69a880d8d8fcdcb562cf94c2350e57b937d7425ab5b8326c67c2adc48f7c87c1db406
- languageName: node
- linkType: hard
-
-"supports-color@npm:^9.0.0":
- version: 9.4.0
- resolution: "supports-color@npm:9.4.0"
- checksum: cb8ff8daeaf1db642156f69a9aa545b6c01dd9c4def4f90a49f46cbf24be0c245d392fcf37acd119cd1819b99dad2cc9b7e3260813f64bcfd7f5b18b5a1eefb8
- languageName: node
- linkType: hard
-
-"supports-preserve-symlinks-flag@npm:^1.0.0":
- version: 1.0.0
- resolution: "supports-preserve-symlinks-flag@npm:1.0.0"
- checksum: 53b1e247e68e05db7b3808b99b892bd36fb096e6fba213a06da7fab22045e97597db425c724f2bbd6c99a3c295e1e73f3e4de78592289f38431049e1277ca0ae
- languageName: node
- linkType: hard
-
-"tar-fs@npm:^2.1.0":
- version: 2.1.1
- resolution: "tar-fs@npm:2.1.1"
- dependencies:
- chownr: ^1.1.1
- mkdirp-classic: ^0.5.2
- pump: ^3.0.0
- tar-stream: ^2.1.4
- checksum: f5b9a70059f5b2969e65f037b4e4da2daf0fa762d3d232ffd96e819e3f94665dbbbe62f76f084f1acb4dbdcce16c6e4dac08d12ffc6d24b8d76720f4d9cf032d
- languageName: node
- linkType: hard
-
-"tar-stream@npm:^2.1.4":
- version: 2.2.0
- resolution: "tar-stream@npm:2.2.0"
- dependencies:
- bl: ^4.0.3
- end-of-stream: ^1.4.1
- fs-constants: ^1.0.0
- inherits: ^2.0.3
- readable-stream: ^3.1.1
- checksum: 699831a8b97666ef50021c767f84924cfee21c142c2eb0e79c63254e140e6408d6d55a065a2992548e72b06de39237ef2b802b99e3ece93ca3904a37622a66f3
- languageName: node
- linkType: hard
-
-"tar@npm:^6.1.11, tar@npm:^6.1.2":
- version: 6.2.1
- resolution: "tar@npm:6.2.1"
- dependencies:
- chownr: ^2.0.0
- fs-minipass: ^2.0.0
- minipass: ^5.0.0
- minizlib: ^2.1.1
- mkdirp: ^1.0.3
- yallist: ^4.0.0
- checksum: f1322768c9741a25356c11373bce918483f40fa9a25c69c59410c8a1247632487edef5fe76c5f12ac51a6356d2f1829e96d2bc34098668a2fc34d76050ac2b6c
- languageName: node
- linkType: hard
-
-"test-exclude@npm:^6.0.0":
- version: 6.0.0
- resolution: "test-exclude@npm:6.0.0"
- dependencies:
- "@istanbuljs/schema": ^0.1.2
- glob: ^7.1.4
- minimatch: ^3.0.4
- checksum: 3b34a3d77165a2cb82b34014b3aba93b1c4637a5011807557dc2f3da826c59975a5ccad765721c4648b39817e3472789f9b0fa98fc854c5c1c7a1e632aacdc28
- languageName: node
- linkType: hard
-
-"tmpl@npm:1.0.5":
- version: 1.0.5
- resolution: "tmpl@npm:1.0.5"
- checksum: cd922d9b853c00fe414c5a774817be65b058d54a2d01ebb415840960406c669a0fc632f66df885e24cb022ec812739199ccbdb8d1164c3e513f85bfca5ab2873
- languageName: node
- linkType: hard
-
-"to-fast-properties@npm:^2.0.0":
- version: 2.0.0
- resolution: "to-fast-properties@npm:2.0.0"
- checksum: be2de62fe58ead94e3e592680052683b1ec986c72d589e7b21e5697f8744cdbf48c266fa72f6c15932894c10187b5f54573a3bcf7da0bfd964d5caf23d436168
- languageName: node
- linkType: hard
-
-"to-regex-range@npm:^5.0.1":
- version: 5.0.1
- resolution: "to-regex-range@npm:5.0.1"
- dependencies:
- is-number: ^7.0.0
- checksum: f76fa01b3d5be85db6a2a143e24df9f60dd047d151062d0ba3df62953f2f697b16fe5dad9b0ac6191c7efc7b1d9dcaa4b768174b7b29da89d4428e64bc0a20ed
- languageName: node
- linkType: hard
-
-"trim-lines@npm:^3.0.0":
- version: 3.0.1
- resolution: "trim-lines@npm:3.0.1"
- checksum: e241da104682a0e0d807222cc1496b92e716af4db7a002f4aeff33ae6a0024fef93165d49eab11aa07c71e1347c42d46563f91dfaa4d3fb945aa535cdead53ed
- languageName: node
- linkType: hard
-
-"trough@npm:^2.0.0":
- version: 2.1.0
- resolution: "trough@npm:2.1.0"
- checksum: a577bb561c2b401cc0e1d9e188fcfcdf63b09b151ff56a668da12197fe97cac15e3d77d5b51f426ccfd94255744a9118e9e9935afe81a3644fa1be9783c82886
- languageName: node
- linkType: hard
-
-"type-detect@npm:4.0.8":
- version: 4.0.8
- resolution: "type-detect@npm:4.0.8"
- checksum: 62b5628bff67c0eb0b66afa371bd73e230399a8d2ad30d852716efcc4656a7516904570cd8631a49a3ce57c10225adf5d0cbdcb47f6b0255fe6557c453925a15
- languageName: node
- linkType: hard
-
-"type-fest@npm:^0.21.3":
- version: 0.21.3
- resolution: "type-fest@npm:0.21.3"
- checksum: e6b32a3b3877f04339bae01c193b273c62ba7bfc9e325b8703c4ee1b32dc8fe4ef5dfa54bf78265e069f7667d058e360ae0f37be5af9f153b22382cd55a9afe0
- languageName: node
- linkType: hard
-
-"type-fest@npm:^2.0.0, type-fest@npm:^2.5.0":
- version: 2.19.0
- resolution: "type-fest@npm:2.19.0"
- checksum: a4ef07ece297c9fba78fc1bd6d85dff4472fe043ede98bd4710d2615d15776902b595abf62bd78339ed6278f021235fb28a96361f8be86ed754f778973a0d278
- languageName: node
- linkType: hard
-
-"unc-path-regex@npm:^0.1.2":
- version: 0.1.2
- resolution: "unc-path-regex@npm:0.1.2"
- checksum: a05fa2006bf4606051c10fc7968f08ce7b28fa646befafa282813aeb1ac1a56f65cb1b577ca7851af2726198d59475bb49b11776036257b843eaacee2860a4ec
- languageName: node
- linkType: hard
-
-"undici-types@npm:~5.26.4":
- version: 5.26.5
- resolution: "undici-types@npm:5.26.5"
- checksum: 3192ef6f3fd5df652f2dc1cd782b49d6ff14dc98e5dced492aa8a8c65425227da5da6aafe22523c67f035a272c599bb89cfe803c1db6311e44bed3042fc25487
- languageName: node
- linkType: hard
-
-"unified@npm:^10.0.0":
- version: 10.1.2
- resolution: "unified@npm:10.1.2"
- dependencies:
- "@types/unist": ^2.0.0
- bail: ^2.0.0
- extend: ^3.0.0
- is-buffer: ^2.0.0
- is-plain-obj: ^4.0.0
- trough: ^2.0.0
- vfile: ^5.0.0
- checksum: 053e7c65ede644607f87bd625a299e4b709869d2f76ec8138569e6e886903b6988b21cd9699e471eda42bee189527be0a9dac05936f1d069a5e65d0125d5d756
- languageName: node
- linkType: hard
-
-"unique-filename@npm:^2.0.0":
- version: 2.0.1
- resolution: "unique-filename@npm:2.0.1"
- dependencies:
- unique-slug: ^3.0.0
- checksum: 807acf3381aff319086b64dc7125a9a37c09c44af7620bd4f7f3247fcd5565660ac12d8b80534dcbfd067e6fe88a67e621386dd796a8af828d1337a8420a255f
- languageName: node
- linkType: hard
-
-"unique-filename@npm:^3.0.0":
- version: 3.0.0
- resolution: "unique-filename@npm:3.0.0"
- dependencies:
- unique-slug: ^4.0.0
- checksum: 8e2f59b356cb2e54aab14ff98a51ac6c45781d15ceaab6d4f1c2228b780193dc70fae4463ce9e1df4479cb9d3304d7c2043a3fb905bdeca71cc7e8ce27e063df
- languageName: node
- linkType: hard
-
-"unique-slug@npm:^3.0.0":
- version: 3.0.0
- resolution: "unique-slug@npm:3.0.0"
- dependencies:
- imurmurhash: ^0.1.4
- checksum: 49f8d915ba7f0101801b922062ee46b7953256c93ceca74303bd8e6413ae10aa7e8216556b54dc5382895e8221d04f1efaf75f945c2e4a515b4139f77aa6640c
- languageName: node
- linkType: hard
-
-"unique-slug@npm:^4.0.0":
- version: 4.0.0
- resolution: "unique-slug@npm:4.0.0"
- dependencies:
- imurmurhash: ^0.1.4
- checksum: 0884b58365af59f89739e6f71e3feacb5b1b41f2df2d842d0757933620e6de08eff347d27e9d499b43c40476cbaf7988638d3acb2ffbcb9d35fd035591adfd15
- languageName: node
- linkType: hard
-
-"unist-builder@npm:^3.0.0":
- version: 3.0.1
- resolution: "unist-builder@npm:3.0.1"
- dependencies:
- "@types/unist": ^2.0.0
- checksum: d8c42fe69aa55a3e9aed3c581007ec5371349bf9885bfa8b0b787634f8d12fa5081f066b205ded379b6d0aeaa884039bae9ebb65a3e71784005fb110aef30d0f
- languageName: node
- linkType: hard
-
-"unist-util-generated@npm:^2.0.0":
- version: 2.0.1
- resolution: "unist-util-generated@npm:2.0.1"
- checksum: 6221ad0571dcc9c8964d6b054f39ef6571ed59cc0ce3e88ae97ea1c70afe76b46412a5ffaa91f96814644ac8477e23fb1b477d71f8d70e625728c5258f5c0d99
- languageName: node
- linkType: hard
-
-"unist-util-is@npm:^5.0.0":
- version: 5.2.1
- resolution: "unist-util-is@npm:5.2.1"
- dependencies:
- "@types/unist": ^2.0.0
- checksum: ae76fdc3d35352cd92f1bedc3a0d407c3b9c42599a52ab9141fe89bdd786b51f0ec5a2ab68b93fb532e239457cae62f7e39eaa80229e1cb94875da2eafcbe5c4
- languageName: node
- linkType: hard
-
-"unist-util-position@npm:^4.0.0":
- version: 4.0.4
- resolution: "unist-util-position@npm:4.0.4"
- dependencies:
- "@types/unist": ^2.0.0
- checksum: e7487b6cec9365299695e3379ded270a1717074fa11fd2407c9b934fb08db6fe1d9077ddeaf877ecf1813665f8ccded5171693d3d9a7a01a125ec5cdd5e88691
- languageName: node
- linkType: hard
-
-"unist-util-stringify-position@npm:^3.0.0":
- version: 3.0.3
- resolution: "unist-util-stringify-position@npm:3.0.3"
- dependencies:
- "@types/unist": ^2.0.0
- checksum: dbd66c15183607ca942a2b1b7a9f6a5996f91c0d30cf8966fb88955a02349d9eefd3974e9010ee67e71175d784c5a9fea915b0aa0b0df99dcb921b95c4c9e124
- languageName: node
- linkType: hard
-
-"unist-util-visit-parents@npm:^5.0.0, unist-util-visit-parents@npm:^5.1.1":
- version: 5.1.3
- resolution: "unist-util-visit-parents@npm:5.1.3"
- dependencies:
- "@types/unist": ^2.0.0
- unist-util-is: ^5.0.0
- checksum: 8ecada5978994f846b64658cf13b4092cd78dea39e1ba2f5090a5de842ba4852712c02351a8ae95250c64f864635e7b02aedf3b4a093552bb30cf1bd160efbaa
- languageName: node
- linkType: hard
-
-"unist-util-visit@npm:^4.0.0, unist-util-visit@npm:^4.1.0":
- version: 4.1.2
- resolution: "unist-util-visit@npm:4.1.2"
- dependencies:
- "@types/unist": ^2.0.0
- unist-util-is: ^5.0.0
- unist-util-visit-parents: ^5.1.1
- checksum: 95a34e3f7b5b2d4b68fd722b6229972099eb97b6df18913eda44a5c11df8b1e27efe7206dd7b88c4ed244a48c474a5b2e2629ab79558ff9eb936840295549cee
- languageName: node
- linkType: hard
-
-"update-browserslist-db@npm:^1.0.13":
- version: 1.0.13
- resolution: "update-browserslist-db@npm:1.0.13"
- dependencies:
- escalade: ^3.1.1
- picocolors: ^1.0.0
- peerDependencies:
- browserslist: ">= 4.21.0"
- bin:
- update-browserslist-db: cli.js
- checksum: 1e47d80182ab6e4ad35396ad8b61008ae2a1330221175d0abd37689658bdb61af9b705bfc41057fd16682474d79944fb2d86767c5ed5ae34b6276b9bed353322
- languageName: node
- linkType: hard
-
-"util-deprecate@npm:^1.0.1":
- version: 1.0.2
- resolution: "util-deprecate@npm:1.0.2"
- checksum: 474acf1146cb2701fe3b074892217553dfcf9a031280919ba1b8d651a068c9b15d863b7303cb15bd00a862b498e6cf4ad7b4a08fb134edd5a6f7641681cb54a2
- languageName: node
- linkType: hard
-
-"util-extend@npm:^1.0.1":
- version: 1.0.3
- resolution: "util-extend@npm:1.0.3"
- checksum: da57f399b331f40fe2cea5409b1f4939231433db9b52dac5593e4390a98b7b0d1318a0daefbcc48123fffe5026ef49f418b3e4df7a4cd7649a2583e559c608a5
- languageName: node
- linkType: hard
-
-"uvu@npm:^0.5.0":
- version: 0.5.6
- resolution: "uvu@npm:0.5.6"
- dependencies:
- dequal: ^2.0.0
- diff: ^5.0.0
- kleur: ^4.0.3
- sade: ^1.7.3
- bin:
- uvu: bin.js
- checksum: 09460a37975627de9fcad396e5078fb844d01aaf64a6399ebfcfd9e55f1c2037539b47611e8631f89be07656962af0cf48c334993db82b9ae9c3d25ce3862168
- languageName: node
- linkType: hard
-
-"v8-to-istanbul@npm:^9.0.1":
- version: 9.2.0
- resolution: "v8-to-istanbul@npm:9.2.0"
- dependencies:
- "@jridgewell/trace-mapping": ^0.3.12
- "@types/istanbul-lib-coverage": ^2.0.1
- convert-source-map: ^2.0.0
- checksum: 31ef98c6a31b1dab6be024cf914f235408cd4c0dc56a5c744a5eea1a9e019ba279e1b6f90d695b78c3186feed391ed492380ccf095009e2eb91f3d058f0b4491
- languageName: node
- linkType: hard
-
-"validate-npm-package-license@npm:^3.0.1":
- version: 3.0.4
- resolution: "validate-npm-package-license@npm:3.0.4"
- dependencies:
- spdx-correct: ^3.0.0
- spdx-expression-parse: ^3.0.0
- checksum: 35703ac889d419cf2aceef63daeadbe4e77227c39ab6287eeb6c1b36a746b364f50ba22e88591f5d017bc54685d8137bc2d328d0a896e4d3fd22093c0f32a9ad
- languageName: node
- linkType: hard
-
-"vfile-location@npm:^4.0.0":
- version: 4.1.0
- resolution: "vfile-location@npm:4.1.0"
- dependencies:
- "@types/unist": ^2.0.0
- vfile: ^5.0.0
- checksum: c894e8e5224170d1f85288f4a1d1ebcee0780823ea2b49d881648ab360ebf01b37ecb09b1c4439a75f9a51f31a9f9742cd045e987763e367c352a1ef7c50d446
- languageName: node
- linkType: hard
-
-"vfile-message@npm:^3.0.0":
- version: 3.1.4
- resolution: "vfile-message@npm:3.1.4"
- dependencies:
- "@types/unist": ^2.0.0
- unist-util-stringify-position: ^3.0.0
- checksum: d0ee7da1973ad76513c274e7912adbed4d08d180eaa34e6bd40bc82459f4b7bc50fcaff41556135e3339995575eac5f6f709aba9332b80f775618ea4880a1367
- languageName: node
- linkType: hard
-
-"vfile-reporter@npm:^7.0.4":
- version: 7.0.5
- resolution: "vfile-reporter@npm:7.0.5"
- dependencies:
- "@types/supports-color": ^8.0.0
- string-width: ^5.0.0
- supports-color: ^9.0.0
- unist-util-stringify-position: ^3.0.0
- vfile: ^5.0.0
- vfile-message: ^3.0.0
- vfile-sort: ^3.0.0
- vfile-statistics: ^2.0.0
- checksum: 0d66370c6c821fbc850c898bfc48c73f19fb320792c532a3af0456bd0f3d395590b365009e60ca4c08ab09a0dabdd43311297bb5c6fbd0abb90bb5abce98264e
- languageName: node
- linkType: hard
-
-"vfile-sort@npm:^3.0.0":
- version: 3.0.1
- resolution: "vfile-sort@npm:3.0.1"
- dependencies:
- vfile: ^5.0.0
- vfile-message: ^3.0.0
- checksum: 6a29e0513c03b3468c628cc27d1511e2f955c3095cd65eeddcb8f601b0972c0cb1f2dc008a7c760e217cf97a44e04e0331b00929b83adc6661b46043b03b5a24
- languageName: node
- linkType: hard
-
-"vfile-statistics@npm:^2.0.0":
- version: 2.0.1
- resolution: "vfile-statistics@npm:2.0.1"
- dependencies:
- vfile: ^5.0.0
- vfile-message: ^3.0.0
- checksum: e3f731bcf992c61c1231a0793785b1288e0a004be9e18ff147e3ead901ae2d21723358609bfe0565881ffe202af68cb171b49753fc8b4bd7a30337aaef256266
- languageName: node
- linkType: hard
-
-"vfile@npm:^5.0.0, vfile@npm:^5.3.4":
- version: 5.3.7
- resolution: "vfile@npm:5.3.7"
- dependencies:
- "@types/unist": ^2.0.0
- is-buffer: ^2.0.0
- unist-util-stringify-position: ^3.0.0
- vfile-message: ^3.0.0
- checksum: 642cce703afc186dbe7cabf698dc954c70146e853491086f5da39e1ce850676fc96b169fcf7898aa3ff245e9313aeec40da93acd1e1fcc0c146dc4f6308b4ef9
- languageName: node
- linkType: hard
-
-"vue-template-compiler@npm:^2.7.8":
- version: 2.7.16
- resolution: "vue-template-compiler@npm:2.7.16"
- dependencies:
- de-indent: ^1.0.2
- he: ^1.2.0
- checksum: a0d52ecbb99bad37f370341b5c594c5caa1f72b15b3f225148ef378fc06aa25c93185ef061f7e6e5e443c9067e70d8f158742716112acf84088932ebcc49ad10
- languageName: node
- linkType: hard
-
-"walker@npm:^1.0.8":
- version: 1.0.8
- resolution: "walker@npm:1.0.8"
- dependencies:
- makeerror: 1.0.12
- checksum: ad7a257ea1e662e57ef2e018f97b3c02a7240ad5093c392186ce0bcf1f1a60bbadd520d073b9beb921ed99f64f065efb63dfc8eec689a80e569f93c1c5d5e16c
- languageName: node
- linkType: hard
-
-"web-namespaces@npm:^2.0.0":
- version: 2.0.1
- resolution: "web-namespaces@npm:2.0.1"
- checksum: b6d9f02f1a43d0ef0848a812d89c83801d5bbad57d8bb61f02eb6d7eb794c3736f6cc2e1191664bb26136594c8218ac609f4069722c6f56d9fc2d808fa9271c6
- languageName: node
- linkType: hard
-
-"which@npm:^2.0.1, which@npm:^2.0.2":
- version: 2.0.2
- resolution: "which@npm:2.0.2"
- dependencies:
- isexe: ^2.0.0
- bin:
- node-which: ./bin/node-which
- checksum: 1a5c563d3c1b52d5f893c8b61afe11abc3bab4afac492e8da5bde69d550de701cf9806235f20a47b5c8fa8a1d6a9135841de2596535e998027a54589000e66d1
- languageName: node
- linkType: hard
-
-"which@npm:^4.0.0":
- version: 4.0.0
- resolution: "which@npm:4.0.0"
- dependencies:
- isexe: ^3.1.1
- bin:
- node-which: bin/which.js
- checksum: f17e84c042592c21e23c8195108cff18c64050b9efb8459589116999ea9da6dd1509e6a1bac3aeebefd137be00fabbb61b5c2bc0aa0f8526f32b58ee2f545651
- languageName: node
- linkType: hard
-
-"wide-align@npm:^1.1.5":
- version: 1.1.5
- resolution: "wide-align@npm:1.1.5"
- dependencies:
- string-width: ^1.0.2 || 2 || 3 || 4
- checksum: d5fc37cd561f9daee3c80e03b92ed3e84d80dde3365a8767263d03dacfc8fa06b065ffe1df00d8c2a09f731482fcacae745abfbb478d4af36d0a891fad4834d3
- languageName: node
- linkType: hard
-
-"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0, wrap-ansi@npm:^7.0.0":
- version: 7.0.0
- resolution: "wrap-ansi@npm:7.0.0"
- dependencies:
- ansi-styles: ^4.0.0
- string-width: ^4.1.0
- strip-ansi: ^6.0.0
- checksum: a790b846fd4505de962ba728a21aaeda189b8ee1c7568ca5e817d85930e06ef8d1689d49dbf0e881e8ef84436af3a88bc49115c2e2788d841ff1b8b5b51a608b
- languageName: node
- linkType: hard
-
-"wrap-ansi@npm:^8.1.0":
- version: 8.1.0
- resolution: "wrap-ansi@npm:8.1.0"
- dependencies:
- ansi-styles: ^6.1.0
- string-width: ^5.0.1
- strip-ansi: ^7.0.1
- checksum: 371733296dc2d616900ce15a0049dca0ef67597d6394c57347ba334393599e800bab03c41d4d45221b6bc967b8c453ec3ae4749eff3894202d16800fdfe0e238
- languageName: node
- linkType: hard
-
-"wrappy@npm:1":
- version: 1.0.2
- resolution: "wrappy@npm:1.0.2"
- checksum: 159da4805f7e84a3d003d8841557196034155008f817172d4e986bd591f74aa82aa7db55929a54222309e01079a65a92a9e6414da5a6aa4b01ee44a511ac3ee5
- languageName: node
- linkType: hard
-
-"write-file-atomic@npm:^4.0.2":
- version: 4.0.2
- resolution: "write-file-atomic@npm:4.0.2"
- dependencies:
- imurmurhash: ^0.1.4
- signal-exit: ^3.0.7
- checksum: 5da60bd4eeeb935eec97ead3df6e28e5917a6bd317478e4a85a5285e8480b8ed96032bbcc6ecd07b236142a24f3ca871c924ec4a6575e623ec1b11bf8c1c253c
- languageName: node
- linkType: hard
-
-"y18n@npm:^5.0.5":
- version: 5.0.8
- resolution: "y18n@npm:5.0.8"
- checksum: 54f0fb95621ee60898a38c572c515659e51cc9d9f787fb109cef6fde4befbe1c4602dc999d30110feee37456ad0f1660fa2edcfde6a9a740f86a290999550d30
- languageName: node
- linkType: hard
-
-"yallist@npm:^3.0.2":
- version: 3.1.1
- resolution: "yallist@npm:3.1.1"
- checksum: 48f7bb00dc19fc635a13a39fe547f527b10c9290e7b3e836b9a8f1ca04d4d342e85714416b3c2ab74949c9c66f9cebb0473e6bc353b79035356103b47641285d
- languageName: node
- linkType: hard
-
-"yallist@npm:^4.0.0":
- version: 4.0.0
- resolution: "yallist@npm:4.0.0"
- checksum: 343617202af32df2a15a3be36a5a8c0c8545208f3d3dfbc6bb7c3e3b7e8c6f8e7485432e4f3b88da3031a6e20afa7c711eded32ddfb122896ac5d914e75848d5
- languageName: node
- linkType: hard
-
-"yargs-parser@npm:^21.1.1":
- version: 21.1.1
- resolution: "yargs-parser@npm:21.1.1"
- checksum: ed2d96a616a9e3e1cc7d204c62ecc61f7aaab633dcbfab2c6df50f7f87b393993fe6640d017759fe112d0cb1e0119f2b4150a87305cc873fd90831c6a58ccf1c
- languageName: node
- linkType: hard
-
-"yargs@npm:^17.3.1, yargs@npm:^17.5.1":
- version: 17.7.2
- resolution: "yargs@npm:17.7.2"
- dependencies:
- cliui: ^8.0.1
- escalade: ^3.1.1
- get-caller-file: ^2.0.5
- require-directory: ^2.1.1
- string-width: ^4.2.3
- y18n: ^5.0.5
- yargs-parser: ^21.1.1
- checksum: 73b572e863aa4a8cbef323dd911d79d193b772defd5a51aab0aca2d446655216f5002c42c5306033968193bdbf892a7a4c110b0d77954a7fdf563e653967b56a
- languageName: node
- linkType: hard
-
-"yocto-queue@npm:^0.1.0":
- version: 0.1.0
- resolution: "yocto-queue@npm:0.1.0"
- checksum: f77b3d8d00310def622123df93d4ee654fc6a0096182af8bd60679ddcdfb3474c56c6c7190817c84a2785648cdee9d721c0154eb45698c62176c322fb46fc700
- languageName: node
- linkType: hard
-
-"yocto-queue@npm:^1.0.0":
- version: 1.0.0
- resolution: "yocto-queue@npm:1.0.0"
- checksum: 2cac84540f65c64ccc1683c267edce396b26b1e931aa429660aefac8fbe0188167b7aee815a3c22fa59a28a58d898d1a2b1825048f834d8d629f4c2a5d443801
- languageName: node
- linkType: hard
-
-"zwitch@npm:^2.0.0, zwitch@npm:^2.0.4":
- version: 2.0.4
- resolution: "zwitch@npm:2.0.4"
- checksum: f22ec5fc2d5f02c423c93d35cdfa83573a3a3bd98c66b927c368ea4d0e7252a500df2a90a6b45522be536a96a73404393c958e945fdba95e6832c200791702b6
- languageName: node
- linkType: hard
diff --git a/gpt4all-training/old-README.md b/gpt4all-training/old-README.md
index 4a2f51dd..6117e122 100644
--- a/gpt4all-training/old-README.md
+++ b/gpt4all-training/old-README.md
@@ -8,14 +8,6 @@
:green_book: Technical Report 1: GPT4All
-
-:snake: Official Python Bindings
-
-
-
-:computer: Official Typescript Bindings
-
-
:speech_balloon: Official Web Chat Interface
@@ -74,8 +66,6 @@ Find the most up-to-date information on the [GPT4All Website](https://gpt4all.io
Note this model is only compatible with the C++ bindings found [here](https://github.com/nomic-ai/gpt4all-chat). It will not work with any existing llama.cpp bindings as we had to do a large fork of llama.cpp. GPT4All will support the ecosystem around this new C++ backend going forward.
-Python bindings are imminent and will be integrated into this [repository](https://github.com/nomic-ai/pyllamacpp). Stay tuned on the [GPT4All discord](https://discord.gg/mGZE39AS3e) for updates.
-
## Training GPT4All-J
Please see [GPT4All-J Technical Report](https://static.nomic.ai/gpt4all/2023_GPT4All-J_Technical_Report_2.pdf) for details.
@@ -146,43 +136,6 @@ This model had all refusal to answer responses removed from training. Try it wit
-----------
Note: the full model on GPU (16GB of RAM required) performs much better in our qualitative evaluations.
-# Python Client
-## CPU Interface
-To run GPT4All in python, see the new [official Python bindings](https://github.com/nomic-ai/pyllamacpp).
-
-The old bindings are still available but now deprecated. They will not work in a notebook environment.
-To get running using the python client with the CPU interface, first install the [nomic client](https://github.com/nomic-ai/nomic) using `pip install nomic`
-Then, you can use the following script to interact with GPT4All:
-```
-from nomic.gpt4all import GPT4All
-m = GPT4All()
-m.open()
-m.prompt('write me a story about a lonely computer')
-```
-
-## GPU Interface
-There are two ways to get up and running with this model on GPU.
-The setup here is slightly more involved than the CPU model.
-1. clone the nomic client [repo](https://github.com/nomic-ai/nomic) and run `pip install .[GPT4All]` in the home dir.
-2. run `pip install nomic` and install the additional deps from the wheels built [here](https://github.com/nomic-ai/nomic/tree/main/bin)
-
-Once this is done, you can run the model on GPU with a script like the following:
-```
-from nomic.gpt4all import GPT4AllGPU
-m = GPT4AllGPU(LLAMA_PATH)
-config = {'num_beams': 2,
- 'min_new_tokens': 10,
- 'max_length': 100,
- 'repetition_penalty': 2.0}
-out = m.generate('write me a story about a lonely computer', config)
-print(out)
-```
-Where LLAMA_PATH is the path to a Huggingface Automodel compliant LLAMA model.
-Nomic is unable to distribute this file at this time.
-We are working on a GPT4All that does not have this limitation right now.
-
-You can pass any of the [huggingface generation config params](https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig) in the config.
-
# GPT4All Compatibility Ecosystem
Edge models in the GPT4All Ecosystem. Please PR as the [community grows](https://huggingface.co/models?sort=modified&search=4bit).
Feel free to convert this to a more structured table.
diff --git a/gpt4all-bindings/python/mkdocs.yml b/mkdocs.yml
similarity index 86%
rename from gpt4all-bindings/python/mkdocs.yml
rename to mkdocs.yml
index 651366a3..7280cfa1 100644
--- a/gpt4all-bindings/python/mkdocs.yml
+++ b/mkdocs.yml
@@ -22,10 +22,6 @@ nav:
- 'Local AI Chat with your OneDrive': 'gpt4all_desktop/cookbook/use-local-ai-models-to-privately-chat-with-One-Drive.md'
- 'API Server':
- 'gpt4all_api_server/home.md'
- - 'Python SDK':
- - 'gpt4all_python/home.md'
- - 'Monitoring': 'gpt4all_python/monitoring.md'
- - 'SDK Reference': 'gpt4all_python/ref.md'
- 'Help':
- 'FAQ': 'gpt4all_help/faq.md'
- 'Troubleshooting': 'gpt4all_help/troubleshooting.md'
@@ -73,14 +69,6 @@ extra_css:
plugins:
- search
- - mkdocstrings:
- handlers:
- python:
- options:
- show_root_heading: True
- heading_level: 4
- show_root_full_path: false
- docstring_section_style: list
- material/social:
cards_layout_options:
font_family: Roboto
diff --git a/requirements-docs.txt b/requirements-docs.txt
new file mode 100644
index 00000000..475cbcdc
--- /dev/null
+++ b/requirements-docs.txt
@@ -0,0 +1,3 @@
+markdown-captions
+mkdocs
+mkdocs-material[imaging]