mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-06-24 14:32:03 +00:00
refactor: clean up directory structure
This commit is contained in:
parent
a514492d52
commit
29c7ac7f73
0
gpt4all/__init__.py
Normal file
0
gpt4all/__init__.py
Normal file
0
gpt4all/eval/__init__.py
Normal file
0
gpt4all/eval/__init__.py
Normal file
@ -3,7 +3,7 @@ import torch
|
||||
import pickle
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
from read import read_config
|
||||
from gpt4all.utils.read import read_config
|
||||
from argparse import ArgumentParser
|
||||
from peft import PeftModelForCausalLM
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
0
gpt4all/inference/__init__.py
Normal file
0
gpt4all/inference/__init__.py
Normal file
@ -1,6 +1,6 @@
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from peft import PeftModelForCausalLM
|
||||
from read import read_config
|
||||
from gpt4all.utils.read import read_config
|
||||
from argparse import ArgumentParser
|
||||
import torch
|
||||
import time
|
@ -2,9 +2,9 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from argparse import ArgumentParser
|
||||
from read import read_config
|
||||
from gpt4all.utils.read import read_config
|
||||
from accelerate.utils import set_seed
|
||||
from data import load_data_for_inference
|
||||
from gpt4all.utils.data import load_data_for_inference
|
||||
from tqdm import tqdm
|
||||
from datasets import Dataset
|
||||
import torch.distributed as dist
|
0
gpt4all/train/__init__.py
Normal file
0
gpt4all/train/__init__.py
Normal file
@ -3,11 +3,11 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, get_scheduler, Lla
|
||||
import torch
|
||||
from torch.optim import AdamW
|
||||
from argparse import ArgumentParser
|
||||
from read import read_config
|
||||
from gpt4all.utils.read import read_config
|
||||
from accelerate import Accelerator
|
||||
from accelerate.utils import DummyScheduler, DummyOptim, set_seed
|
||||
from peft import get_peft_model, LoraConfig, TaskType
|
||||
from data import load_data
|
||||
from gpt4all.utils.data import load_data
|
||||
from torchmetrics import MeanMetric
|
||||
from tqdm import tqdm
|
||||
import wandb
|
0
gpt4all/utils/__init__.py
Normal file
0
gpt4all/utils/__init__.py
Normal file
@ -1,6 +1,6 @@
|
||||
import glob
|
||||
import torch
|
||||
from datasets import load_dataset, concatenate_datasets
|
||||
from datasets import load_dataset
|
||||
import os
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import DefaultDataCollator
|
34
setup.py
Normal file
34
setup.py
Normal file
@ -0,0 +1,34 @@
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
with open('README.md', 'r', encoding='utf-8') as f:
|
||||
long_description = f.read()
|
||||
|
||||
with open('requirements.txt', 'r', encoding='utf-8') as f:
|
||||
requirements = [line.strip() for line in f if line.strip()]
|
||||
|
||||
setup(
|
||||
name='gpt4all',
|
||||
version='0.0.1',
|
||||
author='nomic-ai',
|
||||
author_email='zach@nomic-ai',
|
||||
description='an ecosystem of open-source chatbots trained on a massive collections of clean assistant data including code, stories and dialogue',
|
||||
long_description=long_description,
|
||||
long_description_content_type='text/markdown',
|
||||
url='https://github.com/nomic-ai/gpt4all',
|
||||
packages=find_packages(),
|
||||
install_requires=requirements,
|
||||
classifiers=[
|
||||
'Development Status :: 3 - Alpha',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: 3.9',
|
||||
'Topic :: Text Processing :: Linguistic',
|
||||
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
||||
'Intended Audience :: Science/Research',
|
||||
'Operating System :: OS Independent',
|
||||
],
|
||||
python_requires='>=3.6',
|
||||
)
|
Loading…
Reference in New Issue
Block a user