[Fix] Remove obsolete files - inference (#5650)

This commit is contained in:
Yuanheng Zhao
2024-04-25 22:04:59 +08:00
committed by GitHub
parent a8fd3b0342
commit f342a93871
3 changed files with 0 additions and 301 deletions

View File

@@ -1,59 +0,0 @@
import argparse
import os
import torch
from datasets import load_dataset
from transformers import LlamaTokenizer
from colossalai.inference.quant.smoothquant.models.llama import SmoothLlamaForCausalLM
def build_model_and_tokenizer(model_name):
tokenizer = LlamaTokenizer.from_pretrained(model_name, model_max_length=512)
kwargs = {"torch_dtype": torch.float16, "device_map": "sequential"}
model = SmoothLlamaForCausalLM.from_pretrained(model_name, **kwargs)
model = model.to(torch.float32)
return model, tokenizer
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, help="model name")
parser.add_argument(
"--output-path",
type=str,
help="where to save the checkpoint",
)
parser.add_argument(
"--dataset-path",
type=str,
help="location of the calibration dataset",
)
parser.add_argument("--num-samples", type=int, default=10)
parser.add_argument("--seq-len", type=int, default=512)
args = parser.parse_args()
return args
@torch.no_grad()
def main():
args = parse_args()
model_path = args.model_name
dataset_path = args.dataset_path
output_path = args.output_path
num_samples = args.num_samples
seq_len = args.seq_len
model, tokenizer = build_model_and_tokenizer(model_path)
if not os.path.exists(dataset_path):
raise FileNotFoundError(f"Cannot find the dataset at {args.dataset_path}")
dataset = load_dataset("json", data_files=dataset_path, split="train")
model.quantized(tokenizer, dataset, num_samples=num_samples, seq_len=seq_len)
model = model.cuda()
model.save_quantized(output_path, model_basename="llama-7b")
if __name__ == "__main__":
main()

View File

@@ -1,98 +0,0 @@
import argparse
import torch
import torch.distributed as dist
from transformers import LlamaForCausalLM, LlamaTokenizer
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.inference import InferenceEngine
from colossalai.testing import spawn
INPUT_TEXTS = [
"What is the longest river in the world?",
"Explain the difference between process and thread in compouter science.",
]
def run_inference(args):
llama_model_path = args.model_path
llama_tokenize_path = args.tokenizer_path or args.model_path
max_input_len = args.max_input_len
max_output_len = args.max_output_len
max_batch_size = args.batch_size
micro_batch_size = args.micro_batch_size
tp_size = args.tp_size
pp_size = args.pp_size
rank = dist.get_rank()
tokenizer = LlamaTokenizer.from_pretrained(llama_tokenize_path, padding_side="left")
tokenizer.pad_token_id = tokenizer.eos_token_id
if args.quant is None:
model = LlamaForCausalLM.from_pretrained(llama_model_path, pad_token_id=tokenizer.pad_token_id)
elif args.quant == "gptq":
from auto_gptq import AutoGPTQForCausalLM
model = AutoGPTQForCausalLM.from_quantized(
llama_model_path, inject_fused_attention=False, device=torch.cuda.current_device()
)
elif args.quant == "smoothquant":
from colossalai.inference.quant.smoothquant.models.llama import SmoothLlamaForCausalLM
model = SmoothLlamaForCausalLM.from_quantized(llama_model_path, model_basename=args.smoothquant_base_name)
model = model.cuda()
engine = InferenceEngine(
tp_size=tp_size,
pp_size=pp_size,
model=model,
max_input_len=max_input_len,
max_output_len=max_output_len,
max_batch_size=max_batch_size,
micro_batch_size=micro_batch_size,
quant=args.quant,
dtype=args.dtype,
)
inputs = tokenizer(INPUT_TEXTS, return_tensors="pt", padding="longest", max_length=max_input_len, truncation=True)
inputs = {k: v.to(get_accelerator().get_current_device()) for k, v in inputs.items()}
outputs = engine.generate(inputs)
if rank == 0:
output_texts = tokenizer.batch_decode(outputs, skip_special_tokens=True)
for input_text, output_text in zip(INPUT_TEXTS, output_texts):
print(f"Input: {input_text}")
print(f"Output: {output_text}")
def run_tp_pipeline_inference(rank, world_size, port, args):
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_inference(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--model_path", type=str, help="Model path", required=True)
parser.add_argument("-i", "--input", default="What is the longest river in the world?")
parser.add_argument("-t", "--tokenizer_path", type=str, help="Tokenizer path", default=None)
parser.add_argument(
"-q",
"--quant",
type=str,
choices=["gptq", "smoothquant"],
default=None,
help="quantization type: 'gptq' or 'smoothquant'",
)
parser.add_argument("--smoothquant_base_name", type=str, default=None, help="soothquant base name")
parser.add_argument("--tp_size", type=int, default=1, help="Tensor parallel size")
parser.add_argument("--pp_size", type=int, default=1, help="Pipeline parallel size")
parser.add_argument("-b", "--batch_size", type=int, default=4, help="Maximum batch size")
parser.add_argument("--max_input_len", type=int, default=2048, help="Maximum input length")
parser.add_argument("--max_output_len", type=int, default=64, help="Maximum output length")
parser.add_argument("--micro_batch_size", type=int, default=1, help="Micro batch size")
parser.add_argument("--dtype", default="fp16", type=str)
args = parser.parse_args()
spawn(run_tp_pipeline_inference, nprocs=args.tp_size * args.pp_size, args=args)