kernel
danieldk HF Staff commited on
Commit
0de185e
Β·
1 Parent(s): 3d21f81
Files changed (40) hide show
  1. build/torch25-cxx11-cu118-x86_64-linux/moe/__init__.py +4 -1
  2. build/torch25-cxx11-cu118-x86_64-linux/moe/{_moe_z6j3gzsycn542.abi3.so β†’ _moe_kl62rbmhz23l2.abi3.so} +2 -2
  3. build/torch25-cxx11-cu118-x86_64-linux/moe/_ops.py +3 -3
  4. build/torch25-cxx11-cu121-x86_64-linux/moe/__init__.py +4 -1
  5. build/torch25-cxx11-cu121-x86_64-linux/moe/{_moe_tuji4gj3mmhfo.abi3.so β†’ _moe_ukxxchrl67era.abi3.so} +2 -2
  6. build/torch25-cxx11-cu121-x86_64-linux/moe/_ops.py +3 -3
  7. build/torch25-cxx11-cu124-x86_64-linux/moe/__init__.py +4 -1
  8. build/torch25-cxx11-cu124-x86_64-linux/moe/{_moe_pss5doo675cd4.abi3.so β†’ _moe_dffrrncuzubq2.abi3.so} +2 -2
  9. build/torch25-cxx11-cu124-x86_64-linux/moe/_ops.py +3 -3
  10. build/torch25-cxx98-cu118-x86_64-linux/moe/__init__.py +4 -1
  11. build/torch25-cxx98-cu118-x86_64-linux/moe/{_moe_5uyw6qhdybj5e.abi3.so β†’ _moe_2xnyvi7hnumao.abi3.so} +2 -2
  12. build/torch25-cxx98-cu118-x86_64-linux/moe/_ops.py +3 -3
  13. build/torch25-cxx98-cu121-x86_64-linux/moe/__init__.py +4 -1
  14. build/torch25-cxx98-cu121-x86_64-linux/moe/{_moe_tj3osoay2niyk.abi3.so β†’ _moe_tirshgrhi2pey.abi3.so} +1 -1
  15. build/torch25-cxx98-cu121-x86_64-linux/moe/_ops.py +3 -3
  16. build/torch25-cxx98-cu124-x86_64-linux/moe/__init__.py +4 -1
  17. build/torch25-cxx98-cu124-x86_64-linux/moe/{_moe_phlujktdbqekw.abi3.so β†’ _moe_4kko7ic535qtk.abi3.so} +1 -1
  18. build/torch25-cxx98-cu124-x86_64-linux/moe/_ops.py +3 -3
  19. build/torch26-cxx11-cu118-x86_64-linux/moe/__init__.py +4 -1
  20. build/torch26-cxx11-cu118-x86_64-linux/moe/_moe_5eayygfagmmhg.abi3.so +3 -0
  21. build/torch26-cxx11-cu118-x86_64-linux/moe/_moe_zlz7rpd2goyn2.abi3.so +0 -3
  22. build/torch26-cxx11-cu118-x86_64-linux/moe/_ops.py +3 -3
  23. build/torch26-cxx11-cu124-x86_64-linux/moe/__init__.py +4 -1
  24. build/torch26-cxx11-cu124-x86_64-linux/moe/{_moe_wua27hyvpwmli.abi3.so β†’ _moe_zg2mvvtxox7bw.abi3.so} +1 -1
  25. build/torch26-cxx11-cu124-x86_64-linux/moe/_ops.py +3 -3
  26. build/torch26-cxx11-cu126-x86_64-linux/moe/__init__.py +4 -1
  27. build/torch26-cxx11-cu126-x86_64-linux/moe/_moe_3z4bgea4nke26.abi3.so +0 -3
  28. build/torch26-cxx11-cu126-x86_64-linux/moe/_moe_qkqm4vo3r7uoa.abi3.so +3 -0
  29. build/torch26-cxx11-cu126-x86_64-linux/moe/_ops.py +3 -3
  30. build/torch26-cxx98-cu118-x86_64-linux/moe/__init__.py +4 -1
  31. build/torch26-cxx98-cu118-x86_64-linux/moe/_moe_ecknt47nyrfxy.abi3.so +0 -3
  32. build/torch26-cxx98-cu118-x86_64-linux/moe/_moe_puz6dxkmbuvo4.abi3.so +3 -0
  33. build/torch26-cxx98-cu118-x86_64-linux/moe/_ops.py +3 -3
  34. build/torch26-cxx98-cu124-x86_64-linux/moe/__init__.py +4 -1
  35. build/torch26-cxx98-cu124-x86_64-linux/moe/_moe_c5q6noq7u34gy.abi3.so +3 -0
  36. build/torch26-cxx98-cu124-x86_64-linux/moe/_moe_zirytomtyvq4i.abi3.so +0 -3
  37. build/torch26-cxx98-cu124-x86_64-linux/moe/_ops.py +3 -3
  38. build/torch26-cxx98-cu126-x86_64-linux/moe/__init__.py +4 -1
  39. build/torch26-cxx98-cu126-x86_64-linux/moe/{_moe_cvfkca6s5srfc.abi3.so β†’ _moe_mywzynz4p75mc.abi3.so} +1 -1
  40. build/torch26-cxx98-cu126-x86_64-linux/moe/_ops.py +3 -3
build/torch25-cxx11-cu118-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch25-cxx11-cu118-x86_64-linux/moe/{_moe_z6j3gzsycn542.abi3.so β†’ _moe_kl62rbmhz23l2.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9664c7b8a4e935582354443bebc5557041cac1d35b4b483abe73b4559d7c468c
3
- size 85827696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f190112e8a1b6db549e5dc09ac9be85b5ab2bb3ae24cab57473dcec8ac102988
3
+ size 85823632
build/torch25-cxx11-cu118-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_z6j3gzsycn542
3
- ops = torch.ops._moe_z6j3gzsycn542
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_z6j3gzsycn542::{op_name}"
 
1
  import torch
2
+ from . import _moe_kl62rbmhz23l2
3
+ ops = torch.ops._moe_kl62rbmhz23l2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_kl62rbmhz23l2::{op_name}"
build/torch25-cxx11-cu121-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch25-cxx11-cu121-x86_64-linux/moe/{_moe_tuji4gj3mmhfo.abi3.so β†’ _moe_ukxxchrl67era.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7848d33b838158269ee403fbd068b92fae716bfc27a22f393935247b9ad58848
3
- size 86034528
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5d706109843d1aa69a5489e10c61cd8c9d15a73cf94a739f47646a5ba1a3ef7
3
+ size 86030432
build/torch25-cxx11-cu121-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_tuji4gj3mmhfo
3
- ops = torch.ops._moe_tuji4gj3mmhfo
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_tuji4gj3mmhfo::{op_name}"
 
1
  import torch
2
+ from . import _moe_ukxxchrl67era
3
+ ops = torch.ops._moe_ukxxchrl67era
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_ukxxchrl67era::{op_name}"
build/torch25-cxx11-cu124-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch25-cxx11-cu124-x86_64-linux/moe/{_moe_pss5doo675cd4.abi3.so β†’ _moe_dffrrncuzubq2.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:041c922d7e435dbc7ca974c331455f02ed43ecd4adcd859dd8ee593cfea676e3
3
- size 85733000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d8d67e31ca5e7c836855825acc80256c8e05ee70eec3e6c17f1425f3f783d03
3
+ size 85737096
build/torch25-cxx11-cu124-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_pss5doo675cd4
3
- ops = torch.ops._moe_pss5doo675cd4
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_pss5doo675cd4::{op_name}"
 
1
  import torch
2
+ from . import _moe_dffrrncuzubq2
3
+ ops = torch.ops._moe_dffrrncuzubq2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_dffrrncuzubq2::{op_name}"
build/torch25-cxx98-cu118-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch25-cxx98-cu118-x86_64-linux/moe/{_moe_5uyw6qhdybj5e.abi3.so β†’ _moe_2xnyvi7hnumao.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acfcb8be6199c8e08519a1db8ec8122f7ec69a96c798d9c26e681469ba326782
3
- size 85815472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1af84d29b9948f4475cc37076aab7d7ffdfae86e16ea32cce35fb49951bf9fe
3
+ size 85823688
build/torch25-cxx98-cu118-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_5uyw6qhdybj5e
3
- ops = torch.ops._moe_5uyw6qhdybj5e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_5uyw6qhdybj5e::{op_name}"
 
1
  import torch
2
+ from . import _moe_2xnyvi7hnumao
3
+ ops = torch.ops._moe_2xnyvi7hnumao
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_2xnyvi7hnumao::{op_name}"
build/torch25-cxx98-cu121-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch25-cxx98-cu121-x86_64-linux/moe/{_moe_tj3osoay2niyk.abi3.so β†’ _moe_tirshgrhi2pey.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55b0eed6d5e4f8ef44d2f5baea4466cc633ae561aefd48dc54d648b9dc4742f3
3
  size 86026776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93b5b0b230ca12fc319505c6c887b6dfad85ce8f8c949bf9e4691e0884723844
3
  size 86026776
build/torch25-cxx98-cu121-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_tj3osoay2niyk
3
- ops = torch.ops._moe_tj3osoay2niyk
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_tj3osoay2niyk::{op_name}"
 
1
  import torch
2
+ from . import _moe_tirshgrhi2pey
3
+ ops = torch.ops._moe_tirshgrhi2pey
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_tirshgrhi2pey::{op_name}"
build/torch25-cxx98-cu124-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch25-cxx98-cu124-x86_64-linux/moe/{_moe_phlujktdbqekw.abi3.so β†’ _moe_4kko7ic535qtk.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c3b1cc57c3f73b7c43aec3aa6c0673bc8e24827a0338ef8beeb431392e9ac3e
3
  size 85733416
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90db7c2ac8d9f185897842cdaea41cd7c71943c43bca86f269a4f47711c05701
3
  size 85733416
build/torch25-cxx98-cu124-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_phlujktdbqekw
3
- ops = torch.ops._moe_phlujktdbqekw
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_phlujktdbqekw::{op_name}"
 
1
  import torch
2
+ from . import _moe_4kko7ic535qtk
3
+ ops = torch.ops._moe_4kko7ic535qtk
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_4kko7ic535qtk::{op_name}"
build/torch26-cxx11-cu118-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch26-cxx11-cu118-x86_64-linux/moe/_moe_5eayygfagmmhg.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:158cb70d37cd0daa901b4ef2c4b8120af58dd377b50705f9c512e1fb67a80e8a
3
+ size 85827904
build/torch26-cxx11-cu118-x86_64-linux/moe/_moe_zlz7rpd2goyn2.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:658fb6f129cf6ba0ea172ccfd1f115c0a03e5574122456ab9ecd35122908369a
3
- size 85823776
 
 
 
 
build/torch26-cxx11-cu118-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_zlz7rpd2goyn2
3
- ops = torch.ops._moe_zlz7rpd2goyn2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_zlz7rpd2goyn2::{op_name}"
 
1
  import torch
2
+ from . import _moe_5eayygfagmmhg
3
+ ops = torch.ops._moe_5eayygfagmmhg
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_5eayygfagmmhg::{op_name}"
build/torch26-cxx11-cu124-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch26-cxx11-cu124-x86_64-linux/moe/{_moe_wua27hyvpwmli.abi3.so β†’ _moe_zg2mvvtxox7bw.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3f7f1fa2f76004fba0e0d4eb8cbc3e35a7182538c83261f4a01a8e7401bfa81
3
  size 85737400
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3237636d231ffb136ce06285b22861e2824bf0930bdb3193decb72f1cce7f562
3
  size 85737400
build/torch26-cxx11-cu124-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_wua27hyvpwmli
3
- ops = torch.ops._moe_wua27hyvpwmli
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_wua27hyvpwmli::{op_name}"
 
1
  import torch
2
+ from . import _moe_zg2mvvtxox7bw
3
+ ops = torch.ops._moe_zg2mvvtxox7bw
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_zg2mvvtxox7bw::{op_name}"
build/torch26-cxx11-cu126-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch26-cxx11-cu126-x86_64-linux/moe/_moe_3z4bgea4nke26.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1c7676c910bb702d77a6dbf1653d9f17876924502bbbfc6661b85b8eaa0969d
3
- size 86192320
 
 
 
 
build/torch26-cxx11-cu126-x86_64-linux/moe/_moe_qkqm4vo3r7uoa.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6076f407d8996d1646f0fcb0b87d1866b2a3cc7bf0d558f3b1ad6c13d24ac08
3
+ size 86192352
build/torch26-cxx11-cu126-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_3z4bgea4nke26
3
- ops = torch.ops._moe_3z4bgea4nke26
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_3z4bgea4nke26::{op_name}"
 
1
  import torch
2
+ from . import _moe_qkqm4vo3r7uoa
3
+ ops = torch.ops._moe_qkqm4vo3r7uoa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_qkqm4vo3r7uoa::{op_name}"
build/torch26-cxx98-cu118-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch26-cxx98-cu118-x86_64-linux/moe/_moe_ecknt47nyrfxy.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f81a1d74110cc6f4c75d299b0bfa42b4789fd658d167d78c8786c0e10b08d1e
3
- size 85820040
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/moe/_moe_puz6dxkmbuvo4.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffaedfdc0307bfb3b744b06d6c1785f37768b34febeb2dd7885e7dc19bf535a0
3
+ size 85819984
build/torch26-cxx98-cu118-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_ecknt47nyrfxy
3
- ops = torch.ops._moe_ecknt47nyrfxy
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_ecknt47nyrfxy::{op_name}"
 
1
  import torch
2
+ from . import _moe_puz6dxkmbuvo4
3
+ ops = torch.ops._moe_puz6dxkmbuvo4
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_puz6dxkmbuvo4::{op_name}"
build/torch26-cxx98-cu124-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch26-cxx98-cu124-x86_64-linux/moe/_moe_c5q6noq7u34gy.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:311a7ef7e1366c784dc79a172b3e840086dd7768e202e943b7b69e1b8f54b723
3
+ size 85729768
build/torch26-cxx98-cu124-x86_64-linux/moe/_moe_zirytomtyvq4i.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:333baa2d499c511f0f2f2b81a21eee2e21e8bed8f45311d222da690a59b7ad4e
3
- size 85725672
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_zirytomtyvq4i
3
- ops = torch.ops._moe_zirytomtyvq4i
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_zirytomtyvq4i::{op_name}"
 
1
  import torch
2
+ from . import _moe_c5q6noq7u34gy
3
+ ops = torch.ops._moe_c5q6noq7u34gy
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_c5q6noq7u34gy::{op_name}"
build/torch26-cxx98-cu126-x86_64-linux/moe/__init__.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
- from .fused_moe import fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
@@ -80,7 +80,10 @@ def topk_softmax(
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
 
83
  "fused_marlin_moe",
 
 
84
  "moe_sum",
85
  "moe_align_block_size",
86
  "topk_softmax",
 
2
 
3
  from ._ops import add_op_namespace_prefix, ops
4
  from .fused_marlin_moe import fused_marlin_moe
5
+ from .fused_moe import fused_experts, fused_moe, fused_topk, grouped_topk
6
  from .scalar_type import ScalarType, scalar_types
7
 
8
 
 
80
  __all__ = [
81
  "gptq_marlin_moe_repack",
82
  "awq_marlin_moe_repack",
83
+ "fused_experts",
84
  "fused_marlin_moe",
85
+ "fused_topk",
86
+ "grouped_topk",
87
  "moe_sum",
88
  "moe_align_block_size",
89
  "topk_softmax",
build/torch26-cxx98-cu126-x86_64-linux/moe/{_moe_cvfkca6s5srfc.abi3.so β†’ _moe_mywzynz4p75mc.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:169328fb13eb33abdbde9044fba8e9bf958041ca5217ce1f6dee29a5eca62dff
3
  size 86184688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c574e0ef4c59e930712fcd539927afbe8e115a0b81c5beba6cc9fd727537b8a1
3
  size 86184688
build/torch26-cxx98-cu126-x86_64-linux/moe/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _moe_cvfkca6s5srfc
3
- ops = torch.ops._moe_cvfkca6s5srfc
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_moe_cvfkca6s5srfc::{op_name}"
 
1
  import torch
2
+ from . import _moe_mywzynz4p75mc
3
+ ops = torch.ops._moe_mywzynz4p75mc
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_moe_mywzynz4p75mc::{op_name}"