mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-28 11:45:23 +00:00
* [fp8] refactor hook * [fp8] support gemini plugin * [example] add fp8 option for llama benchmark
24 lines
521 B
Python
24 lines
521 B
Python
import torch.nn.functional as F
|
|
|
|
from colossalai.quantization.fp8 import linear_fp8
|
|
from colossalai.tensor.param_op_hook import ColoParamOpHook
|
|
|
|
|
|
class FP8Hook(ColoParamOpHook):
|
|
def pre_forward(self, params) -> None:
|
|
pass
|
|
|
|
def post_forward(self, params) -> None:
|
|
pass
|
|
|
|
def pre_backward(self, params) -> None:
|
|
pass
|
|
|
|
def post_backward(self, params) -> None:
|
|
pass
|
|
|
|
def rewrite_op(self, func):
|
|
if func is F.linear:
|
|
return linear_fp8
|
|
return func
|