mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-05 02:51:59 +00:00
add benchmark for sft, dpo, simpo, orpo. Add benchmarking result. Support lora with gradient checkpoint
This commit is contained in:
21
applications/ColossalChat/benchmarks/dummy_dataset.py
Normal file
21
applications/ColossalChat/benchmarks/dummy_dataset.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import torch
|
||||
from torch.utils.data import Dataset, DataLoader
|
||||
|
||||
class DummyLLMDataset(Dataset):
|
||||
def __init__(self, keys, seq_len, size=500):
|
||||
self.keys = keys
|
||||
self.seq_len = seq_len
|
||||
self.data = self._generate_data()
|
||||
self.size = size
|
||||
|
||||
def _generate_data(self):
|
||||
data = {}
|
||||
for key in self.keys:
|
||||
data[key] = torch.ones(self.seq_len, dtype = torch.long)
|
||||
return data
|
||||
|
||||
def __len__(self):
|
||||
return self.size
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return {key: self.data[key] for key in self.keys}
|
Reference in New Issue
Block a user