diff --git a/colossalai/kernel/triton/__init__.py b/colossalai/kernel/triton/__init__.py index 5840ad291..75812db03 100644 --- a/colossalai/kernel/triton/__init__.py +++ b/colossalai/kernel/triton/__init__.py @@ -1,12 +1,20 @@ -from .context_attention import bloom_context_attn_fwd, llama_context_attn_fwd -from .copy_kv_cache_dest import copy_kv_cache_to_dest -from .fused_layernorm import layer_norm -from .rms_norm import rmsnorm_forward -from .rotary_embedding_kernel import rotary_embedding_fwd -from .softmax import softmax -from .token_attention_kernel import token_attention_fwd +try: + import triton + HAS_TRITON = True -__all__ = [ - "llama_context_attn_fwd", "bloom_context_attn_fwd", "softmax", "layer_norm", "rmsnorm_forward", - "copy_kv_cache_to_dest", "rotary_embedding_fwd", "token_attention_fwd" -] + from .context_attention import bloom_context_attn_fwd, llama_context_attn_fwd + from .copy_kv_cache_dest import copy_kv_cache_to_dest + from .fused_layernorm import layer_norm + from .rms_norm import rmsnorm_forward + from .rotary_embedding_kernel import rotary_embedding_fwd + from .softmax import softmax + from .token_attention_kernel import token_attention_fwd + + __all__ = [ + "llama_context_attn_fwd", "bloom_context_attn_fwd", "softmax", "layer_norm", "rmsnorm_forward", + "copy_kv_cache_to_dest", "rotary_embedding_fwd", "token_attention_fwd" + ] + +except ImportError: + HAS_TRITON = False + print("Triton is not installed. Please install Triton to use Triton kernels.")