kernel
danieldk HF staff commited on
Commit
c2ee5f1
·
1 Parent(s): 6c5a23a

Make functions visible to make a custom `fused_moe`

Browse files
Files changed (1) hide show
  1. ext-torch/moe/__init__.py +4 -1
ext-torch/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",