mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-10 13:30:19 +00:00
[fp8] support gemini plugin (#5978)
* [fp8] refactor hook * [fp8] support gemini plugin * [example] add fp8 option for llama benchmark
This commit is contained in:
@@ -4,8 +4,8 @@ import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from colossalai.accelerator import get_accelerator
|
||||
from colossalai.booster.plugin.fp8_hook import FP8Hook
|
||||
from colossalai.quantization.fp8 import linear_fp8
|
||||
from colossalai.quantization.fp8_hook import FP8Hook
|
||||
from colossalai.tensor.colo_parameter import ColoParameter
|
||||
from colossalai.tensor.param_op_hook import ColoParamOpHookManager
|
||||
from colossalai.utils import get_current_device
|
||||
|
Reference in New Issue
Block a user