mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-27 19:36:13 +00:00
* [inference] support only TP (#4998) * support only tp * enable tp * add support for bloom (#5008) * [refactor] refactor gptq and smoothquant llama (#5012) * refactor gptq and smoothquant llama * fix import error * fix linear import torch-int * fix smoothquant llama import error * fix import accelerate error * fix bug * fix import smooth cuda * fix smoothcuda * [Inference Refactor] Merge chatglm2 with pp and tp (#5023) merge chatglm with pp and tp * [Refactor] remove useless inference code (#5022) * remove useless code * fix quant model * fix test import bug * mv original inference legacy * fix chatglm2 * [Refactor] refactor policy search and quant type controlling in inference (#5035) * [Refactor] refactor policy search and quant type controling in inference * [inference] update readme (#5051) * update readme * update readme * fix architecture * fix table * fix table * [inference] udpate example (#5053) * udpate example * fix run.sh * fix rebase bug * fix some errors * update readme * add some features * update interface * update readme * update benchmark * add requirements-infer --------- Co-authored-by: Bin Jia <45593998+FoolPlayer@users.noreply.github.com> Co-authored-by: Zhongkai Zhao <kanezz620@gmail.com>
70 lines
2.6 KiB
Python
70 lines
2.6 KiB
Python
import argparse
|
|
|
|
import torch
|
|
import torch.distributed as dist
|
|
|
|
import colossalai
|
|
from colossalai.inference import CaiInferEngine
|
|
from colossalai.inference.quant.smoothquant.models.llama import SmoothLlamaForCausalLM
|
|
from colossalai.logging import disable_existing_loggers
|
|
from colossalai.testing import spawn
|
|
|
|
|
|
@torch.no_grad()
|
|
def run_llama_inference(args):
|
|
quantized_model_dir = args.quantized_path
|
|
max_batch_size = args.max_batch_size
|
|
max_input_len = args.max_input_len
|
|
max_output_len = args.max_output_len
|
|
micro_batch_size = args.micro_batch_size
|
|
|
|
def data_gen():
|
|
input_ids = torch.tensor([[15496, 11, 616, 3290, 318, 13779, 318, 13779]], dtype=torch.int64)
|
|
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.int64)
|
|
return dict(input_ids=input_ids, attention_mask=attention_mask)
|
|
|
|
inputs = data_gen()
|
|
for k, v in inputs.items():
|
|
if torch.is_tensor(v) or "Tensor" in v.__class__.__name__:
|
|
new_shape = [1] * v.dim()
|
|
new_shape[0] = 16
|
|
inputs[k] = v.to("cuda").repeat(*new_shape)
|
|
|
|
model = SmoothLlamaForCausalLM.from_quantized(quantized_model_dir, model_basename="llama-7b")
|
|
model = model.cuda()
|
|
|
|
engine = CaiInferEngine(
|
|
tp_size=2,
|
|
pp_size=2,
|
|
model=model,
|
|
max_batch_size=max_batch_size,
|
|
max_input_len=max_input_len,
|
|
max_output_len=max_output_len,
|
|
micro_batch_size=micro_batch_size,
|
|
quant="smoothquant",
|
|
)
|
|
|
|
output = engine.generate(inputs)
|
|
if dist.get_rank() == 0:
|
|
assert len(output[0]) == 32, f"{len(output)}, {32}"
|
|
|
|
|
|
def run_smoothquant_inference(rank, world_size, port, args):
|
|
disable_existing_loggers()
|
|
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
|
run_llama_inference(args)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument("-q", "--quantized_path", type=str, help="Model path", required=True)
|
|
parser.add_argument("--tp_size", type=int, default=2, help="Tensor parallel size")
|
|
parser.add_argument("--pp_size", type=int, default=2, help="Pipeline parallel size")
|
|
parser.add_argument("--max_batch_size", type=int, default=4, help="Maximum batch size")
|
|
parser.add_argument("--micro_batch_size", type=int, default=4, help="Micro batch size")
|
|
parser.add_argument("--max_input_len", type=int, default=32, help="Maximum input length")
|
|
parser.add_argument("--max_output_len", type=int, default=32, help="Maximum output length")
|
|
|
|
args = parser.parse_args()
|
|
spawn(run_smoothquant_inference, args.tp_size * args.pp_size, args=args)
|