refactor: clean up directory structure

This commit is contained in:
zanussbaum 2023-04-19 18:12:03 -04:00
parent a514492d52
commit 29c7ac7f73
13 changed files with 41 additions and 7 deletions

0
gpt4all/__init__.py Normal file
View File

0
gpt4all/eval/__init__.py Normal file
View File

View File

@ -3,7 +3,7 @@ import torch
import pickle import pickle
import numpy as np import numpy as np
from tqdm import tqdm from tqdm import tqdm
from read import read_config from gpt4all.utils.read import read_config
from argparse import ArgumentParser from argparse import ArgumentParser
from peft import PeftModelForCausalLM from peft import PeftModelForCausalLM
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer

View File

View File

@ -1,6 +1,6 @@
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModelForCausalLM from peft import PeftModelForCausalLM
from read import read_config from gpt4all.utils.read import read_config
from argparse import ArgumentParser from argparse import ArgumentParser
import torch import torch
import time import time

View File

@ -2,9 +2,9 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
import torch import torch
import torch.nn as nn import torch.nn as nn
from argparse import ArgumentParser from argparse import ArgumentParser
from read import read_config from gpt4all.utils.read import read_config
from accelerate.utils import set_seed from accelerate.utils import set_seed
from data import load_data_for_inference from gpt4all.utils.data import load_data_for_inference
from tqdm import tqdm from tqdm import tqdm
from datasets import Dataset from datasets import Dataset
import torch.distributed as dist import torch.distributed as dist

View File

View File

@ -3,11 +3,11 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, get_scheduler, Lla
import torch import torch
from torch.optim import AdamW from torch.optim import AdamW
from argparse import ArgumentParser from argparse import ArgumentParser
from read import read_config from gpt4all.utils.read import read_config
from accelerate import Accelerator from accelerate import Accelerator
from accelerate.utils import DummyScheduler, DummyOptim, set_seed from accelerate.utils import DummyScheduler, DummyOptim, set_seed
from peft import get_peft_model, LoraConfig, TaskType from peft import get_peft_model, LoraConfig, TaskType
from data import load_data from gpt4all.utils.data import load_data
from torchmetrics import MeanMetric from torchmetrics import MeanMetric
from tqdm import tqdm from tqdm import tqdm
import wandb import wandb

View File

View File

@ -1,6 +1,6 @@
import glob import glob
import torch import torch
from datasets import load_dataset, concatenate_datasets from datasets import load_dataset
import os import os
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from transformers import DefaultDataCollator from transformers import DefaultDataCollator

34
setup.py Normal file
View File

@ -0,0 +1,34 @@
from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
with open('requirements.txt', 'r', encoding='utf-8') as f:
requirements = [line.strip() for line in f if line.strip()]
setup(
name='gpt4all',
version='0.0.1',
author='nomic-ai',
author_email='zach@nomic-ai',
description='an ecosystem of open-source chatbots trained on a massive collections of clean assistant data including code, stories and dialogue',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/nomic-ai/gpt4all',
packages=find_packages(),
install_requires=requirements,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Text Processing :: Linguistic',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
],
python_requires='>=3.6',
)