mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-28 11:45:23 +00:00
fix typo under extensions/ (#5330)
This commit is contained in:
parent
febed23288
commit
6a3086a505
@ -46,12 +46,12 @@ kernel = CPUAdamLoader().load()
|
|||||||
|
|
||||||
- Case 2: Load a specific kernel
|
- Case 2: Load a specific kernel
|
||||||
|
|
||||||
This case applies if you are familar with the extensions available.
|
This case applies if you are familiar with the extensions available.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from colossalai.kernel.kernel_loader import CPUAdamLoader
|
from colossalai.kernel.kernel_loader import CPUAdamLoader
|
||||||
|
|
||||||
# load the kernel by giving the kernal name
|
# load the kernel by giving the kernel name
|
||||||
kernel = CPUAdamLoader().load(ext_name="cpu_adam_arm")
|
kernel = CPUAdamLoader().load(ext_name="cpu_adam_arm")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ class _CudaExtension(_CppExtension):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def is_hardware_available(self) -> bool:
|
def is_hardware_available(self) -> bool:
|
||||||
# cuda extension can only be built if cuda is availabe
|
# cuda extension can only be built if cuda is available
|
||||||
try:
|
try:
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ class FlashAttentionDaoCudaExtension(_Extension):
|
|||||||
super().__init__(name="flash_attention_dao_cuda", support_aot=False, support_jit=False, priority=10)
|
super().__init__(name="flash_attention_dao_cuda", support_aot=False, support_jit=False, priority=10)
|
||||||
|
|
||||||
def is_hardware_available(self) -> bool:
|
def is_hardware_available(self) -> bool:
|
||||||
# cuda extension can only be built if cuda is availabe
|
# cuda extension can only be built if cuda is available
|
||||||
try:
|
try:
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ class FlashAttentionXformersCudaExtension(_Extension):
|
|||||||
super().__init__(name="flash_attention_xformers_cuda", support_aot=False, support_jit=False)
|
super().__init__(name="flash_attention_xformers_cuda", support_aot=False, support_jit=False)
|
||||||
|
|
||||||
def is_hardware_available(self) -> bool:
|
def is_hardware_available(self) -> bool:
|
||||||
# cuda extension can only be built if cuda is availabe
|
# cuda extension can only be built if cuda is available
|
||||||
try:
|
try:
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ class _TritonExtension(_Extension):
|
|||||||
super().__init__(name, support_aot=False, support_jit=True, priority=priority)
|
super().__init__(name, support_aot=False, support_jit=True, priority=priority)
|
||||||
|
|
||||||
def is_hardware_compatible(self) -> bool:
|
def is_hardware_compatible(self) -> bool:
|
||||||
# cuda extension can only be built if cuda is availabe
|
# cuda extension can only be built if cuda is available
|
||||||
try:
|
try:
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user