Build
Browse filesThis view is limited to 50 files because it contains too many changes. Β
See raw diff
- build/torch24-cxx11-cu118-x86_64-linux/moe/__init__.py +0 -47
- build/{torch25-cxx11-cu118-x86_64-linux/moe/_moe_nskz7v224zllw.abi3.so β torch24-cxx11-cu118-x86_64-linux/moe/_moe_w3lspmuramohg.abi3.so} +1 -1
- build/torch24-cxx11-cu118-x86_64-linux/moe/_ops.py +3 -3
- build/torch24-cxx11-cu118-x86_64-linux/moe/fused_marlin_moe.py +46 -6
- build/torch24-cxx11-cu118-x86_64-linux/moe/fused_moe.py +2 -2
- build/torch24-cxx11-cu121-x86_64-linux/moe/__init__.py +0 -47
- build/{torch25-cxx11-cu121-x86_64-linux/moe/_moe_t32bhzwhzero6.abi3.so β torch24-cxx11-cu121-x86_64-linux/moe/_moe_xztwj3vfii47s.abi3.so} +1 -1
- build/torch24-cxx11-cu121-x86_64-linux/moe/_ops.py +3 -3
- build/torch24-cxx11-cu121-x86_64-linux/moe/fused_marlin_moe.py +46 -6
- build/torch24-cxx11-cu121-x86_64-linux/moe/fused_moe.py +2 -2
- build/torch24-cxx11-cu124-x86_64-linux/moe/__init__.py +0 -47
- build/{torch25-cxx11-cu124-x86_64-linux/moe/_moe_pgljmg5ek5k4e.abi3.so β torch24-cxx11-cu124-x86_64-linux/moe/_moe_zjfwjryvbxcss.abi3.so} +1 -1
- build/torch24-cxx11-cu124-x86_64-linux/moe/_ops.py +3 -3
- build/torch24-cxx11-cu124-x86_64-linux/moe/fused_marlin_moe.py +46 -6
- build/torch24-cxx11-cu124-x86_64-linux/moe/fused_moe.py +2 -2
- build/torch24-cxx98-cu118-x86_64-linux/moe/__init__.py +0 -47
- build/{torch24-cxx11-cu118-x86_64-linux/moe/_moe_wtjc356yopxde.abi3.so β torch24-cxx98-cu118-x86_64-linux/moe/_moe_vjujc4o4hplak.abi3.so} +2 -2
- build/torch24-cxx98-cu118-x86_64-linux/moe/_ops.py +3 -3
- build/torch24-cxx98-cu118-x86_64-linux/moe/fused_marlin_moe.py +46 -6
- build/torch24-cxx98-cu118-x86_64-linux/moe/fused_moe.py +2 -2
- build/torch24-cxx98-cu121-x86_64-linux/moe/__init__.py +0 -47
- build/{torch25-cxx98-cu121-x86_64-linux/moe/_moe_plblvprmwqffy.abi3.so β torch24-cxx98-cu121-x86_64-linux/moe/_moe_bjua6v5mj6njy.abi3.so} +1 -1
- build/torch24-cxx98-cu121-x86_64-linux/moe/_moe_hrq7opevcb4ug.abi3.so +0 -3
- build/torch24-cxx98-cu121-x86_64-linux/moe/_ops.py +3 -3
- build/torch24-cxx98-cu121-x86_64-linux/moe/fused_marlin_moe.py +46 -6
- build/torch24-cxx98-cu121-x86_64-linux/moe/fused_moe.py +2 -2
- build/torch24-cxx98-cu124-x86_64-linux/moe/__init__.py +0 -47
- build/{torch25-cxx98-cu124-x86_64-linux/moe/_moe_k6bmwmtgkqymw.abi3.so β torch24-cxx98-cu124-x86_64-linux/moe/_moe_ajhcvhc2njy6q.abi3.so} +1 -1
- build/torch24-cxx98-cu124-x86_64-linux/moe/_ops.py +3 -3
- build/torch24-cxx98-cu124-x86_64-linux/moe/fused_marlin_moe.py +46 -6
- build/torch24-cxx98-cu124-x86_64-linux/moe/fused_moe.py +2 -2
- build/torch25-cxx11-cu118-x86_64-linux/moe/__init__.py +0 -47
- build/{torch24-cxx11-cu121-x86_64-linux/moe/_moe_fidhfyl4jgbje.abi3.so β torch25-cxx11-cu118-x86_64-linux/moe/_moe_wbafjrt24mw7y.abi3.so} +2 -2
- build/torch25-cxx11-cu118-x86_64-linux/moe/_ops.py +3 -3
- build/torch25-cxx11-cu118-x86_64-linux/moe/fused_marlin_moe.py +46 -6
- build/torch25-cxx11-cu118-x86_64-linux/moe/fused_moe.py +2 -2
- build/torch25-cxx11-cu121-x86_64-linux/moe/__init__.py +0 -47
- build/{torch24-cxx98-cu118-x86_64-linux/moe/_moe_v3wdnwni3a5ce.abi3.so β torch25-cxx11-cu121-x86_64-linux/moe/_moe_ezuwtpw27xv6u.abi3.so} +2 -2
- build/torch25-cxx11-cu121-x86_64-linux/moe/_ops.py +3 -3
- build/torch25-cxx11-cu121-x86_64-linux/moe/fused_marlin_moe.py +46 -6
- build/torch25-cxx11-cu121-x86_64-linux/moe/fused_moe.py +2 -2
- build/torch25-cxx11-cu124-x86_64-linux/moe/__init__.py +0 -47
- build/{torch24-cxx11-cu124-x86_64-linux/moe/_moe_sg5gu4g3brle6.abi3.so β torch25-cxx11-cu124-x86_64-linux/moe/_moe_b3lelvb3xhtk2.abi3.so} +1 -1
- build/torch25-cxx11-cu124-x86_64-linux/moe/_ops.py +3 -3
- build/torch25-cxx11-cu124-x86_64-linux/moe/fused_marlin_moe.py +46 -6
- build/torch25-cxx11-cu124-x86_64-linux/moe/fused_moe.py +2 -2
- build/torch25-cxx98-cu118-x86_64-linux/moe/__init__.py +0 -47
- build/torch25-cxx98-cu118-x86_64-linux/moe/_moe_dtibz76vuxaaq.abi3.so +0 -3
- build/torch25-cxx98-cu118-x86_64-linux/moe/_moe_mqt4gjnisx6je.abi3.so +3 -0
- build/torch25-cxx98-cu118-x86_64-linux/moe/_ops.py +3 -3
build/torch24-cxx11-cu118-x86_64-linux/moe/__init__.py
CHANGED
@@ -1,19 +1,5 @@
|
|
1 |
-
from typing import TYPE_CHECKING
|
2 |
-
|
3 |
import torch
|
4 |
|
5 |
-
# neuron has torch version that doesn't even have impl_abstract
|
6 |
-
if TYPE_CHECKING:
|
7 |
-
|
8 |
-
def register_fake(fn):
|
9 |
-
return lambda name: fn
|
10 |
-
|
11 |
-
else:
|
12 |
-
try:
|
13 |
-
from torch.library import register_fake
|
14 |
-
except ImportError:
|
15 |
-
from torch.library import impl_abstract as register_fake
|
16 |
-
|
17 |
from ._ops import add_op_namespace_prefix, ops
|
18 |
from .fused_marlin_moe import fused_marlin_moe
|
19 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
@@ -91,39 +77,6 @@ def topk_softmax(
|
|
91 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
92 |
|
93 |
|
94 |
-
if hasattr(ops, "marlin_gemm_moe"):
|
95 |
-
|
96 |
-
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
97 |
-
def marlin_gemm_moe_fake(
|
98 |
-
a: torch.Tensor,
|
99 |
-
b_q_weights: torch.Tensor,
|
100 |
-
sorted_ids: torch.Tensor,
|
101 |
-
topk_weights: torch.Tensor,
|
102 |
-
topk_ids: torch.Tensor,
|
103 |
-
b_scales: torch.Tensor,
|
104 |
-
b_zero_points: torch.Tensor,
|
105 |
-
g_idx: torch.Tensor,
|
106 |
-
perm: torch.Tensor,
|
107 |
-
workspace: torch.Tensor,
|
108 |
-
b_q_type: ScalarType,
|
109 |
-
size_m: torch.SymInt,
|
110 |
-
size_n: torch.SymInt,
|
111 |
-
size_k: torch.SymInt,
|
112 |
-
is_k_full: bool,
|
113 |
-
num_experts: int,
|
114 |
-
topk: int,
|
115 |
-
moe_block_size: int,
|
116 |
-
replicate_input: bool,
|
117 |
-
apply_weights: bool,
|
118 |
-
) -> torch.Tensor:
|
119 |
-
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
120 |
-
|
121 |
-
|
122 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
123 |
-
ops.silu_and_mul(out, x)
|
124 |
-
return out
|
125 |
-
|
126 |
-
|
127 |
__all__ = [
|
128 |
"gptq_marlin_moe_repack",
|
129 |
"awq_marlin_moe_repack",
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from ._ops import add_op_namespace_prefix, ops
|
4 |
from .fused_marlin_moe import fused_marlin_moe
|
5 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
|
|
77 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
__all__ = [
|
81 |
"gptq_marlin_moe_repack",
|
82 |
"awq_marlin_moe_repack",
|
build/{torch25-cxx11-cu118-x86_64-linux/moe/_moe_nskz7v224zllw.abi3.so β torch24-cxx11-cu118-x86_64-linux/moe/_moe_w3lspmuramohg.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 84165672
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2faeea044dbfd59eaf429d039ae368ed0c3e500817ac1acaefb3720ceca1f5ea
|
3 |
size 84165672
|
build/torch24-cxx11-cu118-x86_64-linux/moe/_ops.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import torch
|
2 |
-
from . import
|
3 |
-
ops = torch.ops.
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
-
return f"
|
|
|
1 |
import torch
|
2 |
+
from . import _moe_w3lspmuramohg
|
3 |
+
ops = torch.ops._moe_w3lspmuramohg
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
+
return f"_moe_w3lspmuramohg::{op_name}"
|
build/torch24-cxx11-cu118-x86_64-linux/moe/fused_marlin_moe.py
CHANGED
@@ -1,13 +1,25 @@
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
-
from typing import Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
|
|
8 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
9 |
-
from .scalar_type import scalar_types
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
@@ -116,7 +128,7 @@ def single_marlin_moe(
|
|
116 |
|
117 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
118 |
|
119 |
-
intermediate_cache = ops.
|
120 |
hidden_states,
|
121 |
w,
|
122 |
sorted_token_ids,
|
@@ -287,7 +299,7 @@ def fused_marlin_moe(
|
|
287 |
dtype=hidden_states.dtype,
|
288 |
)
|
289 |
|
290 |
-
intermediate_cache1 = ops.
|
291 |
hidden_states,
|
292 |
w1,
|
293 |
sorted_token_ids,
|
@@ -312,7 +324,7 @@ def fused_marlin_moe(
|
|
312 |
|
313 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
314 |
|
315 |
-
intermediate_cache3 = ops.
|
316 |
intermediate_cache2,
|
317 |
w2,
|
318 |
sorted_token_ids,
|
@@ -336,3 +348,31 @@ def fused_marlin_moe(
|
|
336 |
)
|
337 |
|
338 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
8 |
+
from ._ops import add_op_namespace_prefix, ops
|
9 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
10 |
+
from .scalar_type import ScalarType, scalar_types
|
11 |
+
|
12 |
+
# neuron has torch version that doesn't even have impl_abstract
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
|
15 |
+
def register_fake(fn):
|
16 |
+
return lambda name: fn
|
17 |
+
|
18 |
+
else:
|
19 |
+
try:
|
20 |
+
from torch.library import register_fake
|
21 |
+
except ImportError:
|
22 |
+
from torch.library import impl_abstract as register_fake
|
23 |
|
24 |
|
25 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
|
|
128 |
|
129 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
130 |
|
131 |
+
intermediate_cache = ops.marlin_gemm_moe(
|
132 |
hidden_states,
|
133 |
w,
|
134 |
sorted_token_ids,
|
|
|
299 |
dtype=hidden_states.dtype,
|
300 |
)
|
301 |
|
302 |
+
intermediate_cache1 = ops.marlin_gemm_moe(
|
303 |
hidden_states,
|
304 |
w1,
|
305 |
sorted_token_ids,
|
|
|
324 |
|
325 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
326 |
|
327 |
+
intermediate_cache3 = ops.marlin_gemm_moe(
|
328 |
intermediate_cache2,
|
329 |
w2,
|
330 |
sorted_token_ids,
|
|
|
348 |
)
|
349 |
|
350 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
351 |
+
|
352 |
+
|
353 |
+
if hasattr(ops, "marlin_gemm_moe"):
|
354 |
+
|
355 |
+
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
356 |
+
def marlin_gemm_moe_fake(
|
357 |
+
a: torch.Tensor,
|
358 |
+
b_q_weights: torch.Tensor,
|
359 |
+
sorted_ids: torch.Tensor,
|
360 |
+
topk_weights: torch.Tensor,
|
361 |
+
topk_ids: torch.Tensor,
|
362 |
+
b_scales: torch.Tensor,
|
363 |
+
b_zero_points: torch.Tensor,
|
364 |
+
g_idx: torch.Tensor,
|
365 |
+
perm: torch.Tensor,
|
366 |
+
workspace: torch.Tensor,
|
367 |
+
b_q_type: ScalarType,
|
368 |
+
size_m: torch.SymInt,
|
369 |
+
size_n: torch.SymInt,
|
370 |
+
size_k: torch.SymInt,
|
371 |
+
is_k_full: bool,
|
372 |
+
num_experts: int,
|
373 |
+
topk: int,
|
374 |
+
moe_block_size: int,
|
375 |
+
replicate_input: bool,
|
376 |
+
apply_weights: bool,
|
377 |
+
) -> torch.Tensor:
|
378 |
+
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
build/torch24-cxx11-cu118-x86_64-linux/moe/fused_moe.py
CHANGED
@@ -9,9 +9,9 @@ import torch
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
-
from .
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
-
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
+
from ._ops import ops
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
+
from .platforms import current_platform
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
build/torch24-cxx11-cu121-x86_64-linux/moe/__init__.py
CHANGED
@@ -1,19 +1,5 @@
|
|
1 |
-
from typing import TYPE_CHECKING
|
2 |
-
|
3 |
import torch
|
4 |
|
5 |
-
# neuron has torch version that doesn't even have impl_abstract
|
6 |
-
if TYPE_CHECKING:
|
7 |
-
|
8 |
-
def register_fake(fn):
|
9 |
-
return lambda name: fn
|
10 |
-
|
11 |
-
else:
|
12 |
-
try:
|
13 |
-
from torch.library import register_fake
|
14 |
-
except ImportError:
|
15 |
-
from torch.library import impl_abstract as register_fake
|
16 |
-
|
17 |
from ._ops import add_op_namespace_prefix, ops
|
18 |
from .fused_marlin_moe import fused_marlin_moe
|
19 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
@@ -91,39 +77,6 @@ def topk_softmax(
|
|
91 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
92 |
|
93 |
|
94 |
-
if hasattr(ops, "marlin_gemm_moe"):
|
95 |
-
|
96 |
-
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
97 |
-
def marlin_gemm_moe_fake(
|
98 |
-
a: torch.Tensor,
|
99 |
-
b_q_weights: torch.Tensor,
|
100 |
-
sorted_ids: torch.Tensor,
|
101 |
-
topk_weights: torch.Tensor,
|
102 |
-
topk_ids: torch.Tensor,
|
103 |
-
b_scales: torch.Tensor,
|
104 |
-
b_zero_points: torch.Tensor,
|
105 |
-
g_idx: torch.Tensor,
|
106 |
-
perm: torch.Tensor,
|
107 |
-
workspace: torch.Tensor,
|
108 |
-
b_q_type: ScalarType,
|
109 |
-
size_m: torch.SymInt,
|
110 |
-
size_n: torch.SymInt,
|
111 |
-
size_k: torch.SymInt,
|
112 |
-
is_k_full: bool,
|
113 |
-
num_experts: int,
|
114 |
-
topk: int,
|
115 |
-
moe_block_size: int,
|
116 |
-
replicate_input: bool,
|
117 |
-
apply_weights: bool,
|
118 |
-
) -> torch.Tensor:
|
119 |
-
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
120 |
-
|
121 |
-
|
122 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
123 |
-
ops.silu_and_mul(out, x)
|
124 |
-
return out
|
125 |
-
|
126 |
-
|
127 |
__all__ = [
|
128 |
"gptq_marlin_moe_repack",
|
129 |
"awq_marlin_moe_repack",
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from ._ops import add_op_namespace_prefix, ops
|
4 |
from .fused_marlin_moe import fused_marlin_moe
|
5 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
|
|
77 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
__all__ = [
|
81 |
"gptq_marlin_moe_repack",
|
82 |
"awq_marlin_moe_repack",
|
build/{torch25-cxx11-cu121-x86_64-linux/moe/_moe_t32bhzwhzero6.abi3.so β torch24-cxx11-cu121-x86_64-linux/moe/_moe_xztwj3vfii47s.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 84364504
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c5d4bd811ee24dd293d42959e6d23d66dddcc186b2ede701ebcbf6d66705fe1
|
3 |
size 84364504
|
build/torch24-cxx11-cu121-x86_64-linux/moe/_ops.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import torch
|
2 |
-
from . import
|
3 |
-
ops = torch.ops.
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
-
return f"
|
|
|
1 |
import torch
|
2 |
+
from . import _moe_xztwj3vfii47s
|
3 |
+
ops = torch.ops._moe_xztwj3vfii47s
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
+
return f"_moe_xztwj3vfii47s::{op_name}"
|
build/torch24-cxx11-cu121-x86_64-linux/moe/fused_marlin_moe.py
CHANGED
@@ -1,13 +1,25 @@
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
-
from typing import Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
|
|
8 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
9 |
-
from .scalar_type import scalar_types
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
@@ -116,7 +128,7 @@ def single_marlin_moe(
|
|
116 |
|
117 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
118 |
|
119 |
-
intermediate_cache = ops.
|
120 |
hidden_states,
|
121 |
w,
|
122 |
sorted_token_ids,
|
@@ -287,7 +299,7 @@ def fused_marlin_moe(
|
|
287 |
dtype=hidden_states.dtype,
|
288 |
)
|
289 |
|
290 |
-
intermediate_cache1 = ops.
|
291 |
hidden_states,
|
292 |
w1,
|
293 |
sorted_token_ids,
|
@@ -312,7 +324,7 @@ def fused_marlin_moe(
|
|
312 |
|
313 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
314 |
|
315 |
-
intermediate_cache3 = ops.
|
316 |
intermediate_cache2,
|
317 |
w2,
|
318 |
sorted_token_ids,
|
@@ -336,3 +348,31 @@ def fused_marlin_moe(
|
|
336 |
)
|
337 |
|
338 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
8 |
+
from ._ops import add_op_namespace_prefix, ops
|
9 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
10 |
+
from .scalar_type import ScalarType, scalar_types
|
11 |
+
|
12 |
+
# neuron has torch version that doesn't even have impl_abstract
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
|
15 |
+
def register_fake(fn):
|
16 |
+
return lambda name: fn
|
17 |
+
|
18 |
+
else:
|
19 |
+
try:
|
20 |
+
from torch.library import register_fake
|
21 |
+
except ImportError:
|
22 |
+
from torch.library import impl_abstract as register_fake
|
23 |
|
24 |
|
25 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
|
|
128 |
|
129 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
130 |
|
131 |
+
intermediate_cache = ops.marlin_gemm_moe(
|
132 |
hidden_states,
|
133 |
w,
|
134 |
sorted_token_ids,
|
|
|
299 |
dtype=hidden_states.dtype,
|
300 |
)
|
301 |
|
302 |
+
intermediate_cache1 = ops.marlin_gemm_moe(
|
303 |
hidden_states,
|
304 |
w1,
|
305 |
sorted_token_ids,
|
|
|
324 |
|
325 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
326 |
|
327 |
+
intermediate_cache3 = ops.marlin_gemm_moe(
|
328 |
intermediate_cache2,
|
329 |
w2,
|
330 |
sorted_token_ids,
|
|
|
348 |
)
|
349 |
|
350 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
351 |
+
|
352 |
+
|
353 |
+
if hasattr(ops, "marlin_gemm_moe"):
|
354 |
+
|
355 |
+
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
356 |
+
def marlin_gemm_moe_fake(
|
357 |
+
a: torch.Tensor,
|
358 |
+
b_q_weights: torch.Tensor,
|
359 |
+
sorted_ids: torch.Tensor,
|
360 |
+
topk_weights: torch.Tensor,
|
361 |
+
topk_ids: torch.Tensor,
|
362 |
+
b_scales: torch.Tensor,
|
363 |
+
b_zero_points: torch.Tensor,
|
364 |
+
g_idx: torch.Tensor,
|
365 |
+
perm: torch.Tensor,
|
366 |
+
workspace: torch.Tensor,
|
367 |
+
b_q_type: ScalarType,
|
368 |
+
size_m: torch.SymInt,
|
369 |
+
size_n: torch.SymInt,
|
370 |
+
size_k: torch.SymInt,
|
371 |
+
is_k_full: bool,
|
372 |
+
num_experts: int,
|
373 |
+
topk: int,
|
374 |
+
moe_block_size: int,
|
375 |
+
replicate_input: bool,
|
376 |
+
apply_weights: bool,
|
377 |
+
) -> torch.Tensor:
|
378 |
+
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
build/torch24-cxx11-cu121-x86_64-linux/moe/fused_moe.py
CHANGED
@@ -9,9 +9,9 @@ import torch
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
-
from .
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
-
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
+
from ._ops import ops
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
+
from .platforms import current_platform
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
build/torch24-cxx11-cu124-x86_64-linux/moe/__init__.py
CHANGED
@@ -1,19 +1,5 @@
|
|
1 |
-
from typing import TYPE_CHECKING
|
2 |
-
|
3 |
import torch
|
4 |
|
5 |
-
# neuron has torch version that doesn't even have impl_abstract
|
6 |
-
if TYPE_CHECKING:
|
7 |
-
|
8 |
-
def register_fake(fn):
|
9 |
-
return lambda name: fn
|
10 |
-
|
11 |
-
else:
|
12 |
-
try:
|
13 |
-
from torch.library import register_fake
|
14 |
-
except ImportError:
|
15 |
-
from torch.library import impl_abstract as register_fake
|
16 |
-
|
17 |
from ._ops import add_op_namespace_prefix, ops
|
18 |
from .fused_marlin_moe import fused_marlin_moe
|
19 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
@@ -91,39 +77,6 @@ def topk_softmax(
|
|
91 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
92 |
|
93 |
|
94 |
-
if hasattr(ops, "marlin_gemm_moe"):
|
95 |
-
|
96 |
-
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
97 |
-
def marlin_gemm_moe_fake(
|
98 |
-
a: torch.Tensor,
|
99 |
-
b_q_weights: torch.Tensor,
|
100 |
-
sorted_ids: torch.Tensor,
|
101 |
-
topk_weights: torch.Tensor,
|
102 |
-
topk_ids: torch.Tensor,
|
103 |
-
b_scales: torch.Tensor,
|
104 |
-
b_zero_points: torch.Tensor,
|
105 |
-
g_idx: torch.Tensor,
|
106 |
-
perm: torch.Tensor,
|
107 |
-
workspace: torch.Tensor,
|
108 |
-
b_q_type: ScalarType,
|
109 |
-
size_m: torch.SymInt,
|
110 |
-
size_n: torch.SymInt,
|
111 |
-
size_k: torch.SymInt,
|
112 |
-
is_k_full: bool,
|
113 |
-
num_experts: int,
|
114 |
-
topk: int,
|
115 |
-
moe_block_size: int,
|
116 |
-
replicate_input: bool,
|
117 |
-
apply_weights: bool,
|
118 |
-
) -> torch.Tensor:
|
119 |
-
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
120 |
-
|
121 |
-
|
122 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
123 |
-
ops.silu_and_mul(out, x)
|
124 |
-
return out
|
125 |
-
|
126 |
-
|
127 |
__all__ = [
|
128 |
"gptq_marlin_moe_repack",
|
129 |
"awq_marlin_moe_repack",
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from ._ops import add_op_namespace_prefix, ops
|
4 |
from .fused_marlin_moe import fused_marlin_moe
|
5 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
|
|
77 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
__all__ = [
|
81 |
"gptq_marlin_moe_repack",
|
82 |
"awq_marlin_moe_repack",
|
build/{torch25-cxx11-cu124-x86_64-linux/moe/_moe_pgljmg5ek5k4e.abi3.so β torch24-cxx11-cu124-x86_64-linux/moe/_moe_zjfwjryvbxcss.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 84063160
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8e33340a0b05f5776c1e5ef66e371b2c198dc00c03c810e2c4ef20923d7a417
|
3 |
size 84063160
|
build/torch24-cxx11-cu124-x86_64-linux/moe/_ops.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import torch
|
2 |
-
from . import
|
3 |
-
ops = torch.ops.
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
-
return f"
|
|
|
1 |
import torch
|
2 |
+
from . import _moe_zjfwjryvbxcss
|
3 |
+
ops = torch.ops._moe_zjfwjryvbxcss
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
+
return f"_moe_zjfwjryvbxcss::{op_name}"
|
build/torch24-cxx11-cu124-x86_64-linux/moe/fused_marlin_moe.py
CHANGED
@@ -1,13 +1,25 @@
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
-
from typing import Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
|
|
8 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
9 |
-
from .scalar_type import scalar_types
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
@@ -116,7 +128,7 @@ def single_marlin_moe(
|
|
116 |
|
117 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
118 |
|
119 |
-
intermediate_cache = ops.
|
120 |
hidden_states,
|
121 |
w,
|
122 |
sorted_token_ids,
|
@@ -287,7 +299,7 @@ def fused_marlin_moe(
|
|
287 |
dtype=hidden_states.dtype,
|
288 |
)
|
289 |
|
290 |
-
intermediate_cache1 = ops.
|
291 |
hidden_states,
|
292 |
w1,
|
293 |
sorted_token_ids,
|
@@ -312,7 +324,7 @@ def fused_marlin_moe(
|
|
312 |
|
313 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
314 |
|
315 |
-
intermediate_cache3 = ops.
|
316 |
intermediate_cache2,
|
317 |
w2,
|
318 |
sorted_token_ids,
|
@@ -336,3 +348,31 @@ def fused_marlin_moe(
|
|
336 |
)
|
337 |
|
338 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
8 |
+
from ._ops import add_op_namespace_prefix, ops
|
9 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
10 |
+
from .scalar_type import ScalarType, scalar_types
|
11 |
+
|
12 |
+
# neuron has torch version that doesn't even have impl_abstract
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
|
15 |
+
def register_fake(fn):
|
16 |
+
return lambda name: fn
|
17 |
+
|
18 |
+
else:
|
19 |
+
try:
|
20 |
+
from torch.library import register_fake
|
21 |
+
except ImportError:
|
22 |
+
from torch.library import impl_abstract as register_fake
|
23 |
|
24 |
|
25 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
|
|
128 |
|
129 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
130 |
|
131 |
+
intermediate_cache = ops.marlin_gemm_moe(
|
132 |
hidden_states,
|
133 |
w,
|
134 |
sorted_token_ids,
|
|
|
299 |
dtype=hidden_states.dtype,
|
300 |
)
|
301 |
|
302 |
+
intermediate_cache1 = ops.marlin_gemm_moe(
|
303 |
hidden_states,
|
304 |
w1,
|
305 |
sorted_token_ids,
|
|
|
324 |
|
325 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
326 |
|
327 |
+
intermediate_cache3 = ops.marlin_gemm_moe(
|
328 |
intermediate_cache2,
|
329 |
w2,
|
330 |
sorted_token_ids,
|
|
|
348 |
)
|
349 |
|
350 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
351 |
+
|
352 |
+
|
353 |
+
if hasattr(ops, "marlin_gemm_moe"):
|
354 |
+
|
355 |
+
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
356 |
+
def marlin_gemm_moe_fake(
|
357 |
+
a: torch.Tensor,
|
358 |
+
b_q_weights: torch.Tensor,
|
359 |
+
sorted_ids: torch.Tensor,
|
360 |
+
topk_weights: torch.Tensor,
|
361 |
+
topk_ids: torch.Tensor,
|
362 |
+
b_scales: torch.Tensor,
|
363 |
+
b_zero_points: torch.Tensor,
|
364 |
+
g_idx: torch.Tensor,
|
365 |
+
perm: torch.Tensor,
|
366 |
+
workspace: torch.Tensor,
|
367 |
+
b_q_type: ScalarType,
|
368 |
+
size_m: torch.SymInt,
|
369 |
+
size_n: torch.SymInt,
|
370 |
+
size_k: torch.SymInt,
|
371 |
+
is_k_full: bool,
|
372 |
+
num_experts: int,
|
373 |
+
topk: int,
|
374 |
+
moe_block_size: int,
|
375 |
+
replicate_input: bool,
|
376 |
+
apply_weights: bool,
|
377 |
+
) -> torch.Tensor:
|
378 |
+
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
build/torch24-cxx11-cu124-x86_64-linux/moe/fused_moe.py
CHANGED
@@ -9,9 +9,9 @@ import torch
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
-
from .
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
-
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
+
from ._ops import ops
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
+
from .platforms import current_platform
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
build/torch24-cxx98-cu118-x86_64-linux/moe/__init__.py
CHANGED
@@ -1,19 +1,5 @@
|
|
1 |
-
from typing import TYPE_CHECKING
|
2 |
-
|
3 |
import torch
|
4 |
|
5 |
-
# neuron has torch version that doesn't even have impl_abstract
|
6 |
-
if TYPE_CHECKING:
|
7 |
-
|
8 |
-
def register_fake(fn):
|
9 |
-
return lambda name: fn
|
10 |
-
|
11 |
-
else:
|
12 |
-
try:
|
13 |
-
from torch.library import register_fake
|
14 |
-
except ImportError:
|
15 |
-
from torch.library import impl_abstract as register_fake
|
16 |
-
|
17 |
from ._ops import add_op_namespace_prefix, ops
|
18 |
from .fused_marlin_moe import fused_marlin_moe
|
19 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
@@ -91,39 +77,6 @@ def topk_softmax(
|
|
91 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
92 |
|
93 |
|
94 |
-
if hasattr(ops, "marlin_gemm_moe"):
|
95 |
-
|
96 |
-
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
97 |
-
def marlin_gemm_moe_fake(
|
98 |
-
a: torch.Tensor,
|
99 |
-
b_q_weights: torch.Tensor,
|
100 |
-
sorted_ids: torch.Tensor,
|
101 |
-
topk_weights: torch.Tensor,
|
102 |
-
topk_ids: torch.Tensor,
|
103 |
-
b_scales: torch.Tensor,
|
104 |
-
b_zero_points: torch.Tensor,
|
105 |
-
g_idx: torch.Tensor,
|
106 |
-
perm: torch.Tensor,
|
107 |
-
workspace: torch.Tensor,
|
108 |
-
b_q_type: ScalarType,
|
109 |
-
size_m: torch.SymInt,
|
110 |
-
size_n: torch.SymInt,
|
111 |
-
size_k: torch.SymInt,
|
112 |
-
is_k_full: bool,
|
113 |
-
num_experts: int,
|
114 |
-
topk: int,
|
115 |
-
moe_block_size: int,
|
116 |
-
replicate_input: bool,
|
117 |
-
apply_weights: bool,
|
118 |
-
) -> torch.Tensor:
|
119 |
-
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
120 |
-
|
121 |
-
|
122 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
123 |
-
ops.silu_and_mul(out, x)
|
124 |
-
return out
|
125 |
-
|
126 |
-
|
127 |
__all__ = [
|
128 |
"gptq_marlin_moe_repack",
|
129 |
"awq_marlin_moe_repack",
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from ._ops import add_op_namespace_prefix, ops
|
4 |
from .fused_marlin_moe import fused_marlin_moe
|
5 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
|
|
77 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
__all__ = [
|
81 |
"gptq_marlin_moe_repack",
|
82 |
"awq_marlin_moe_repack",
|
build/{torch24-cxx11-cu118-x86_64-linux/moe/_moe_wtjc356yopxde.abi3.so β torch24-cxx98-cu118-x86_64-linux/moe/_moe_vjujc4o4hplak.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0aea1e40159b3d8ca879344b36d6c3229d764baf9553b1bef2a04460f1f03f31
|
3 |
+
size 84157888
|
build/torch24-cxx98-cu118-x86_64-linux/moe/_ops.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import torch
|
2 |
-
from . import
|
3 |
-
ops = torch.ops.
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
-
return f"
|
|
|
1 |
import torch
|
2 |
+
from . import _moe_vjujc4o4hplak
|
3 |
+
ops = torch.ops._moe_vjujc4o4hplak
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
+
return f"_moe_vjujc4o4hplak::{op_name}"
|
build/torch24-cxx98-cu118-x86_64-linux/moe/fused_marlin_moe.py
CHANGED
@@ -1,13 +1,25 @@
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
-
from typing import Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
|
|
8 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
9 |
-
from .scalar_type import scalar_types
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
@@ -116,7 +128,7 @@ def single_marlin_moe(
|
|
116 |
|
117 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
118 |
|
119 |
-
intermediate_cache = ops.
|
120 |
hidden_states,
|
121 |
w,
|
122 |
sorted_token_ids,
|
@@ -287,7 +299,7 @@ def fused_marlin_moe(
|
|
287 |
dtype=hidden_states.dtype,
|
288 |
)
|
289 |
|
290 |
-
intermediate_cache1 = ops.
|
291 |
hidden_states,
|
292 |
w1,
|
293 |
sorted_token_ids,
|
@@ -312,7 +324,7 @@ def fused_marlin_moe(
|
|
312 |
|
313 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
314 |
|
315 |
-
intermediate_cache3 = ops.
|
316 |
intermediate_cache2,
|
317 |
w2,
|
318 |
sorted_token_ids,
|
@@ -336,3 +348,31 @@ def fused_marlin_moe(
|
|
336 |
)
|
337 |
|
338 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
8 |
+
from ._ops import add_op_namespace_prefix, ops
|
9 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
10 |
+
from .scalar_type import ScalarType, scalar_types
|
11 |
+
|
12 |
+
# neuron has torch version that doesn't even have impl_abstract
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
|
15 |
+
def register_fake(fn):
|
16 |
+
return lambda name: fn
|
17 |
+
|
18 |
+
else:
|
19 |
+
try:
|
20 |
+
from torch.library import register_fake
|
21 |
+
except ImportError:
|
22 |
+
from torch.library import impl_abstract as register_fake
|
23 |
|
24 |
|
25 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
|
|
128 |
|
129 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
130 |
|
131 |
+
intermediate_cache = ops.marlin_gemm_moe(
|
132 |
hidden_states,
|
133 |
w,
|
134 |
sorted_token_ids,
|
|
|
299 |
dtype=hidden_states.dtype,
|
300 |
)
|
301 |
|
302 |
+
intermediate_cache1 = ops.marlin_gemm_moe(
|
303 |
hidden_states,
|
304 |
w1,
|
305 |
sorted_token_ids,
|
|
|
324 |
|
325 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
326 |
|
327 |
+
intermediate_cache3 = ops.marlin_gemm_moe(
|
328 |
intermediate_cache2,
|
329 |
w2,
|
330 |
sorted_token_ids,
|
|
|
348 |
)
|
349 |
|
350 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
351 |
+
|
352 |
+
|
353 |
+
if hasattr(ops, "marlin_gemm_moe"):
|
354 |
+
|
355 |
+
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
356 |
+
def marlin_gemm_moe_fake(
|
357 |
+
a: torch.Tensor,
|
358 |
+
b_q_weights: torch.Tensor,
|
359 |
+
sorted_ids: torch.Tensor,
|
360 |
+
topk_weights: torch.Tensor,
|
361 |
+
topk_ids: torch.Tensor,
|
362 |
+
b_scales: torch.Tensor,
|
363 |
+
b_zero_points: torch.Tensor,
|
364 |
+
g_idx: torch.Tensor,
|
365 |
+
perm: torch.Tensor,
|
366 |
+
workspace: torch.Tensor,
|
367 |
+
b_q_type: ScalarType,
|
368 |
+
size_m: torch.SymInt,
|
369 |
+
size_n: torch.SymInt,
|
370 |
+
size_k: torch.SymInt,
|
371 |
+
is_k_full: bool,
|
372 |
+
num_experts: int,
|
373 |
+
topk: int,
|
374 |
+
moe_block_size: int,
|
375 |
+
replicate_input: bool,
|
376 |
+
apply_weights: bool,
|
377 |
+
) -> torch.Tensor:
|
378 |
+
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
build/torch24-cxx98-cu118-x86_64-linux/moe/fused_moe.py
CHANGED
@@ -9,9 +9,9 @@ import torch
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
-
from .
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
-
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
+
from ._ops import ops
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
+
from .platforms import current_platform
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
build/torch24-cxx98-cu121-x86_64-linux/moe/__init__.py
CHANGED
@@ -1,19 +1,5 @@
|
|
1 |
-
from typing import TYPE_CHECKING
|
2 |
-
|
3 |
import torch
|
4 |
|
5 |
-
# neuron has torch version that doesn't even have impl_abstract
|
6 |
-
if TYPE_CHECKING:
|
7 |
-
|
8 |
-
def register_fake(fn):
|
9 |
-
return lambda name: fn
|
10 |
-
|
11 |
-
else:
|
12 |
-
try:
|
13 |
-
from torch.library import register_fake
|
14 |
-
except ImportError:
|
15 |
-
from torch.library import impl_abstract as register_fake
|
16 |
-
|
17 |
from ._ops import add_op_namespace_prefix, ops
|
18 |
from .fused_marlin_moe import fused_marlin_moe
|
19 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
@@ -91,39 +77,6 @@ def topk_softmax(
|
|
91 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
92 |
|
93 |
|
94 |
-
if hasattr(ops, "marlin_gemm_moe"):
|
95 |
-
|
96 |
-
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
97 |
-
def marlin_gemm_moe_fake(
|
98 |
-
a: torch.Tensor,
|
99 |
-
b_q_weights: torch.Tensor,
|
100 |
-
sorted_ids: torch.Tensor,
|
101 |
-
topk_weights: torch.Tensor,
|
102 |
-
topk_ids: torch.Tensor,
|
103 |
-
b_scales: torch.Tensor,
|
104 |
-
b_zero_points: torch.Tensor,
|
105 |
-
g_idx: torch.Tensor,
|
106 |
-
perm: torch.Tensor,
|
107 |
-
workspace: torch.Tensor,
|
108 |
-
b_q_type: ScalarType,
|
109 |
-
size_m: torch.SymInt,
|
110 |
-
size_n: torch.SymInt,
|
111 |
-
size_k: torch.SymInt,
|
112 |
-
is_k_full: bool,
|
113 |
-
num_experts: int,
|
114 |
-
topk: int,
|
115 |
-
moe_block_size: int,
|
116 |
-
replicate_input: bool,
|
117 |
-
apply_weights: bool,
|
118 |
-
) -> torch.Tensor:
|
119 |
-
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
120 |
-
|
121 |
-
|
122 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
123 |
-
ops.silu_and_mul(out, x)
|
124 |
-
return out
|
125 |
-
|
126 |
-
|
127 |
__all__ = [
|
128 |
"gptq_marlin_moe_repack",
|
129 |
"awq_marlin_moe_repack",
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from ._ops import add_op_namespace_prefix, ops
|
4 |
from .fused_marlin_moe import fused_marlin_moe
|
5 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
|
|
77 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
__all__ = [
|
81 |
"gptq_marlin_moe_repack",
|
82 |
"awq_marlin_moe_repack",
|
build/{torch25-cxx98-cu121-x86_64-linux/moe/_moe_plblvprmwqffy.abi3.so β torch24-cxx98-cu121-x86_64-linux/moe/_moe_bjua6v5mj6njy.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 84360960
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71767ce941c8fb0e823c11cdebb01bfd77f2250df2873b862473803072276bf4
|
3 |
size 84360960
|
build/torch24-cxx98-cu121-x86_64-linux/moe/_moe_hrq7opevcb4ug.abi3.so
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:0d1b063e4c52f5d744025e000fd79c5f41cdf56a32883c2d269b9c59f586c9e4
|
3 |
-
size 84360992
|
|
|
|
|
|
|
|
build/torch24-cxx98-cu121-x86_64-linux/moe/_ops.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import torch
|
2 |
-
from . import
|
3 |
-
ops = torch.ops.
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
-
return f"
|
|
|
1 |
import torch
|
2 |
+
from . import _moe_bjua6v5mj6njy
|
3 |
+
ops = torch.ops._moe_bjua6v5mj6njy
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
+
return f"_moe_bjua6v5mj6njy::{op_name}"
|
build/torch24-cxx98-cu121-x86_64-linux/moe/fused_marlin_moe.py
CHANGED
@@ -1,13 +1,25 @@
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
-
from typing import Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
|
|
8 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
9 |
-
from .scalar_type import scalar_types
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
@@ -116,7 +128,7 @@ def single_marlin_moe(
|
|
116 |
|
117 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
118 |
|
119 |
-
intermediate_cache = ops.
|
120 |
hidden_states,
|
121 |
w,
|
122 |
sorted_token_ids,
|
@@ -287,7 +299,7 @@ def fused_marlin_moe(
|
|
287 |
dtype=hidden_states.dtype,
|
288 |
)
|
289 |
|
290 |
-
intermediate_cache1 = ops.
|
291 |
hidden_states,
|
292 |
w1,
|
293 |
sorted_token_ids,
|
@@ -312,7 +324,7 @@ def fused_marlin_moe(
|
|
312 |
|
313 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
314 |
|
315 |
-
intermediate_cache3 = ops.
|
316 |
intermediate_cache2,
|
317 |
w2,
|
318 |
sorted_token_ids,
|
@@ -336,3 +348,31 @@ def fused_marlin_moe(
|
|
336 |
)
|
337 |
|
338 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
8 |
+
from ._ops import add_op_namespace_prefix, ops
|
9 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
10 |
+
from .scalar_type import ScalarType, scalar_types
|
11 |
+
|
12 |
+
# neuron has torch version that doesn't even have impl_abstract
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
|
15 |
+
def register_fake(fn):
|
16 |
+
return lambda name: fn
|
17 |
+
|
18 |
+
else:
|
19 |
+
try:
|
20 |
+
from torch.library import register_fake
|
21 |
+
except ImportError:
|
22 |
+
from torch.library import impl_abstract as register_fake
|
23 |
|
24 |
|
25 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
|
|
128 |
|
129 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
130 |
|
131 |
+
intermediate_cache = ops.marlin_gemm_moe(
|
132 |
hidden_states,
|
133 |
w,
|
134 |
sorted_token_ids,
|
|
|
299 |
dtype=hidden_states.dtype,
|
300 |
)
|
301 |
|
302 |
+
intermediate_cache1 = ops.marlin_gemm_moe(
|
303 |
hidden_states,
|
304 |
w1,
|
305 |
sorted_token_ids,
|
|
|
324 |
|
325 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
326 |
|
327 |
+
intermediate_cache3 = ops.marlin_gemm_moe(
|
328 |
intermediate_cache2,
|
329 |
w2,
|
330 |
sorted_token_ids,
|
|
|
348 |
)
|
349 |
|
350 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
351 |
+
|
352 |
+
|
353 |
+
if hasattr(ops, "marlin_gemm_moe"):
|
354 |
+
|
355 |
+
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
356 |
+
def marlin_gemm_moe_fake(
|
357 |
+
a: torch.Tensor,
|
358 |
+
b_q_weights: torch.Tensor,
|
359 |
+
sorted_ids: torch.Tensor,
|
360 |
+
topk_weights: torch.Tensor,
|
361 |
+
topk_ids: torch.Tensor,
|
362 |
+
b_scales: torch.Tensor,
|
363 |
+
b_zero_points: torch.Tensor,
|
364 |
+
g_idx: torch.Tensor,
|
365 |
+
perm: torch.Tensor,
|
366 |
+
workspace: torch.Tensor,
|
367 |
+
b_q_type: ScalarType,
|
368 |
+
size_m: torch.SymInt,
|
369 |
+
size_n: torch.SymInt,
|
370 |
+
size_k: torch.SymInt,
|
371 |
+
is_k_full: bool,
|
372 |
+
num_experts: int,
|
373 |
+
topk: int,
|
374 |
+
moe_block_size: int,
|
375 |
+
replicate_input: bool,
|
376 |
+
apply_weights: bool,
|
377 |
+
) -> torch.Tensor:
|
378 |
+
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
build/torch24-cxx98-cu121-x86_64-linux/moe/fused_moe.py
CHANGED
@@ -9,9 +9,9 @@ import torch
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
-
from .
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
-
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
+
from ._ops import ops
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
+
from .platforms import current_platform
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
build/torch24-cxx98-cu124-x86_64-linux/moe/__init__.py
CHANGED
@@ -1,19 +1,5 @@
|
|
1 |
-
from typing import TYPE_CHECKING
|
2 |
-
|
3 |
import torch
|
4 |
|
5 |
-
# neuron has torch version that doesn't even have impl_abstract
|
6 |
-
if TYPE_CHECKING:
|
7 |
-
|
8 |
-
def register_fake(fn):
|
9 |
-
return lambda name: fn
|
10 |
-
|
11 |
-
else:
|
12 |
-
try:
|
13 |
-
from torch.library import register_fake
|
14 |
-
except ImportError:
|
15 |
-
from torch.library import impl_abstract as register_fake
|
16 |
-
|
17 |
from ._ops import add_op_namespace_prefix, ops
|
18 |
from .fused_marlin_moe import fused_marlin_moe
|
19 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
@@ -91,39 +77,6 @@ def topk_softmax(
|
|
91 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
92 |
|
93 |
|
94 |
-
if hasattr(ops, "marlin_gemm_moe"):
|
95 |
-
|
96 |
-
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
97 |
-
def marlin_gemm_moe_fake(
|
98 |
-
a: torch.Tensor,
|
99 |
-
b_q_weights: torch.Tensor,
|
100 |
-
sorted_ids: torch.Tensor,
|
101 |
-
topk_weights: torch.Tensor,
|
102 |
-
topk_ids: torch.Tensor,
|
103 |
-
b_scales: torch.Tensor,
|
104 |
-
b_zero_points: torch.Tensor,
|
105 |
-
g_idx: torch.Tensor,
|
106 |
-
perm: torch.Tensor,
|
107 |
-
workspace: torch.Tensor,
|
108 |
-
b_q_type: ScalarType,
|
109 |
-
size_m: torch.SymInt,
|
110 |
-
size_n: torch.SymInt,
|
111 |
-
size_k: torch.SymInt,
|
112 |
-
is_k_full: bool,
|
113 |
-
num_experts: int,
|
114 |
-
topk: int,
|
115 |
-
moe_block_size: int,
|
116 |
-
replicate_input: bool,
|
117 |
-
apply_weights: bool,
|
118 |
-
) -> torch.Tensor:
|
119 |
-
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
120 |
-
|
121 |
-
|
122 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
123 |
-
ops.silu_and_mul(out, x)
|
124 |
-
return out
|
125 |
-
|
126 |
-
|
127 |
__all__ = [
|
128 |
"gptq_marlin_moe_repack",
|
129 |
"awq_marlin_moe_repack",
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from ._ops import add_op_namespace_prefix, ops
|
4 |
from .fused_marlin_moe import fused_marlin_moe
|
5 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
|
|
77 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
__all__ = [
|
81 |
"gptq_marlin_moe_repack",
|
82 |
"awq_marlin_moe_repack",
|
build/{torch25-cxx98-cu124-x86_64-linux/moe/_moe_k6bmwmtgkqymw.abi3.so β torch24-cxx98-cu124-x86_64-linux/moe/_moe_ajhcvhc2njy6q.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 84059616
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:38256704ec3f4ad93da175dff5054670c8e9db26b5573579d80331af6f271373
|
3 |
size 84059616
|
build/torch24-cxx98-cu124-x86_64-linux/moe/_ops.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import torch
|
2 |
-
from . import
|
3 |
-
ops = torch.ops.
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
-
return f"
|
|
|
1 |
import torch
|
2 |
+
from . import _moe_ajhcvhc2njy6q
|
3 |
+
ops = torch.ops._moe_ajhcvhc2njy6q
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
+
return f"_moe_ajhcvhc2njy6q::{op_name}"
|
build/torch24-cxx98-cu124-x86_64-linux/moe/fused_marlin_moe.py
CHANGED
@@ -1,13 +1,25 @@
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
-
from typing import Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
|
|
8 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
9 |
-
from .scalar_type import scalar_types
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
@@ -116,7 +128,7 @@ def single_marlin_moe(
|
|
116 |
|
117 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
118 |
|
119 |
-
intermediate_cache = ops.
|
120 |
hidden_states,
|
121 |
w,
|
122 |
sorted_token_ids,
|
@@ -287,7 +299,7 @@ def fused_marlin_moe(
|
|
287 |
dtype=hidden_states.dtype,
|
288 |
)
|
289 |
|
290 |
-
intermediate_cache1 = ops.
|
291 |
hidden_states,
|
292 |
w1,
|
293 |
sorted_token_ids,
|
@@ -312,7 +324,7 @@ def fused_marlin_moe(
|
|
312 |
|
313 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
314 |
|
315 |
-
intermediate_cache3 = ops.
|
316 |
intermediate_cache2,
|
317 |
w2,
|
318 |
sorted_token_ids,
|
@@ -336,3 +348,31 @@ def fused_marlin_moe(
|
|
336 |
)
|
337 |
|
338 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
8 |
+
from ._ops import add_op_namespace_prefix, ops
|
9 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
10 |
+
from .scalar_type import ScalarType, scalar_types
|
11 |
+
|
12 |
+
# neuron has torch version that doesn't even have impl_abstract
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
|
15 |
+
def register_fake(fn):
|
16 |
+
return lambda name: fn
|
17 |
+
|
18 |
+
else:
|
19 |
+
try:
|
20 |
+
from torch.library import register_fake
|
21 |
+
except ImportError:
|
22 |
+
from torch.library import impl_abstract as register_fake
|
23 |
|
24 |
|
25 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
|
|
128 |
|
129 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
130 |
|
131 |
+
intermediate_cache = ops.marlin_gemm_moe(
|
132 |
hidden_states,
|
133 |
w,
|
134 |
sorted_token_ids,
|
|
|
299 |
dtype=hidden_states.dtype,
|
300 |
)
|
301 |
|
302 |
+
intermediate_cache1 = ops.marlin_gemm_moe(
|
303 |
hidden_states,
|
304 |
w1,
|
305 |
sorted_token_ids,
|
|
|
324 |
|
325 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
326 |
|
327 |
+
intermediate_cache3 = ops.marlin_gemm_moe(
|
328 |
intermediate_cache2,
|
329 |
w2,
|
330 |
sorted_token_ids,
|
|
|
348 |
)
|
349 |
|
350 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
351 |
+
|
352 |
+
|
353 |
+
if hasattr(ops, "marlin_gemm_moe"):
|
354 |
+
|
355 |
+
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
356 |
+
def marlin_gemm_moe_fake(
|
357 |
+
a: torch.Tensor,
|
358 |
+
b_q_weights: torch.Tensor,
|
359 |
+
sorted_ids: torch.Tensor,
|
360 |
+
topk_weights: torch.Tensor,
|
361 |
+
topk_ids: torch.Tensor,
|
362 |
+
b_scales: torch.Tensor,
|
363 |
+
b_zero_points: torch.Tensor,
|
364 |
+
g_idx: torch.Tensor,
|
365 |
+
perm: torch.Tensor,
|
366 |
+
workspace: torch.Tensor,
|
367 |
+
b_q_type: ScalarType,
|
368 |
+
size_m: torch.SymInt,
|
369 |
+
size_n: torch.SymInt,
|
370 |
+
size_k: torch.SymInt,
|
371 |
+
is_k_full: bool,
|
372 |
+
num_experts: int,
|
373 |
+
topk: int,
|
374 |
+
moe_block_size: int,
|
375 |
+
replicate_input: bool,
|
376 |
+
apply_weights: bool,
|
377 |
+
) -> torch.Tensor:
|
378 |
+
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
build/torch24-cxx98-cu124-x86_64-linux/moe/fused_moe.py
CHANGED
@@ -9,9 +9,9 @@ import torch
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
-
from .
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
-
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
+
from ._ops import ops
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
+
from .platforms import current_platform
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
build/torch25-cxx11-cu118-x86_64-linux/moe/__init__.py
CHANGED
@@ -1,19 +1,5 @@
|
|
1 |
-
from typing import TYPE_CHECKING
|
2 |
-
|
3 |
import torch
|
4 |
|
5 |
-
# neuron has torch version that doesn't even have impl_abstract
|
6 |
-
if TYPE_CHECKING:
|
7 |
-
|
8 |
-
def register_fake(fn):
|
9 |
-
return lambda name: fn
|
10 |
-
|
11 |
-
else:
|
12 |
-
try:
|
13 |
-
from torch.library import register_fake
|
14 |
-
except ImportError:
|
15 |
-
from torch.library import impl_abstract as register_fake
|
16 |
-
|
17 |
from ._ops import add_op_namespace_prefix, ops
|
18 |
from .fused_marlin_moe import fused_marlin_moe
|
19 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
@@ -91,39 +77,6 @@ def topk_softmax(
|
|
91 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
92 |
|
93 |
|
94 |
-
if hasattr(ops, "marlin_gemm_moe"):
|
95 |
-
|
96 |
-
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
97 |
-
def marlin_gemm_moe_fake(
|
98 |
-
a: torch.Tensor,
|
99 |
-
b_q_weights: torch.Tensor,
|
100 |
-
sorted_ids: torch.Tensor,
|
101 |
-
topk_weights: torch.Tensor,
|
102 |
-
topk_ids: torch.Tensor,
|
103 |
-
b_scales: torch.Tensor,
|
104 |
-
b_zero_points: torch.Tensor,
|
105 |
-
g_idx: torch.Tensor,
|
106 |
-
perm: torch.Tensor,
|
107 |
-
workspace: torch.Tensor,
|
108 |
-
b_q_type: ScalarType,
|
109 |
-
size_m: torch.SymInt,
|
110 |
-
size_n: torch.SymInt,
|
111 |
-
size_k: torch.SymInt,
|
112 |
-
is_k_full: bool,
|
113 |
-
num_experts: int,
|
114 |
-
topk: int,
|
115 |
-
moe_block_size: int,
|
116 |
-
replicate_input: bool,
|
117 |
-
apply_weights: bool,
|
118 |
-
) -> torch.Tensor:
|
119 |
-
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
120 |
-
|
121 |
-
|
122 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
123 |
-
ops.silu_and_mul(out, x)
|
124 |
-
return out
|
125 |
-
|
126 |
-
|
127 |
__all__ = [
|
128 |
"gptq_marlin_moe_repack",
|
129 |
"awq_marlin_moe_repack",
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from ._ops import add_op_namespace_prefix, ops
|
4 |
from .fused_marlin_moe import fused_marlin_moe
|
5 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
|
|
77 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
__all__ = [
|
81 |
"gptq_marlin_moe_repack",
|
82 |
"awq_marlin_moe_repack",
|
build/{torch24-cxx11-cu121-x86_64-linux/moe/_moe_fidhfyl4jgbje.abi3.so β torch25-cxx11-cu118-x86_64-linux/moe/_moe_wbafjrt24mw7y.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb03ab835bafe70c299a49cec39abf27f5b5d78715b16eed3527a683181df529
|
3 |
+
size 84165672
|
build/torch25-cxx11-cu118-x86_64-linux/moe/_ops.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import torch
|
2 |
-
from . import
|
3 |
-
ops = torch.ops.
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
-
return f"
|
|
|
1 |
import torch
|
2 |
+
from . import _moe_wbafjrt24mw7y
|
3 |
+
ops = torch.ops._moe_wbafjrt24mw7y
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
+
return f"_moe_wbafjrt24mw7y::{op_name}"
|
build/torch25-cxx11-cu118-x86_64-linux/moe/fused_marlin_moe.py
CHANGED
@@ -1,13 +1,25 @@
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
-
from typing import Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
|
|
8 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
9 |
-
from .scalar_type import scalar_types
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
@@ -116,7 +128,7 @@ def single_marlin_moe(
|
|
116 |
|
117 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
118 |
|
119 |
-
intermediate_cache = ops.
|
120 |
hidden_states,
|
121 |
w,
|
122 |
sorted_token_ids,
|
@@ -287,7 +299,7 @@ def fused_marlin_moe(
|
|
287 |
dtype=hidden_states.dtype,
|
288 |
)
|
289 |
|
290 |
-
intermediate_cache1 = ops.
|
291 |
hidden_states,
|
292 |
w1,
|
293 |
sorted_token_ids,
|
@@ -312,7 +324,7 @@ def fused_marlin_moe(
|
|
312 |
|
313 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
314 |
|
315 |
-
intermediate_cache3 = ops.
|
316 |
intermediate_cache2,
|
317 |
w2,
|
318 |
sorted_token_ids,
|
@@ -336,3 +348,31 @@ def fused_marlin_moe(
|
|
336 |
)
|
337 |
|
338 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
8 |
+
from ._ops import add_op_namespace_prefix, ops
|
9 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
10 |
+
from .scalar_type import ScalarType, scalar_types
|
11 |
+
|
12 |
+
# neuron has torch version that doesn't even have impl_abstract
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
|
15 |
+
def register_fake(fn):
|
16 |
+
return lambda name: fn
|
17 |
+
|
18 |
+
else:
|
19 |
+
try:
|
20 |
+
from torch.library import register_fake
|
21 |
+
except ImportError:
|
22 |
+
from torch.library import impl_abstract as register_fake
|
23 |
|
24 |
|
25 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
|
|
128 |
|
129 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
130 |
|
131 |
+
intermediate_cache = ops.marlin_gemm_moe(
|
132 |
hidden_states,
|
133 |
w,
|
134 |
sorted_token_ids,
|
|
|
299 |
dtype=hidden_states.dtype,
|
300 |
)
|
301 |
|
302 |
+
intermediate_cache1 = ops.marlin_gemm_moe(
|
303 |
hidden_states,
|
304 |
w1,
|
305 |
sorted_token_ids,
|
|
|
324 |
|
325 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
326 |
|
327 |
+
intermediate_cache3 = ops.marlin_gemm_moe(
|
328 |
intermediate_cache2,
|
329 |
w2,
|
330 |
sorted_token_ids,
|
|
|
348 |
)
|
349 |
|
350 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
351 |
+
|
352 |
+
|
353 |
+
if hasattr(ops, "marlin_gemm_moe"):
|
354 |
+
|
355 |
+
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
356 |
+
def marlin_gemm_moe_fake(
|
357 |
+
a: torch.Tensor,
|
358 |
+
b_q_weights: torch.Tensor,
|
359 |
+
sorted_ids: torch.Tensor,
|
360 |
+
topk_weights: torch.Tensor,
|
361 |
+
topk_ids: torch.Tensor,
|
362 |
+
b_scales: torch.Tensor,
|
363 |
+
b_zero_points: torch.Tensor,
|
364 |
+
g_idx: torch.Tensor,
|
365 |
+
perm: torch.Tensor,
|
366 |
+
workspace: torch.Tensor,
|
367 |
+
b_q_type: ScalarType,
|
368 |
+
size_m: torch.SymInt,
|
369 |
+
size_n: torch.SymInt,
|
370 |
+
size_k: torch.SymInt,
|
371 |
+
is_k_full: bool,
|
372 |
+
num_experts: int,
|
373 |
+
topk: int,
|
374 |
+
moe_block_size: int,
|
375 |
+
replicate_input: bool,
|
376 |
+
apply_weights: bool,
|
377 |
+
) -> torch.Tensor:
|
378 |
+
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
build/torch25-cxx11-cu118-x86_64-linux/moe/fused_moe.py
CHANGED
@@ -9,9 +9,9 @@ import torch
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
-
from .
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
-
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
+
from ._ops import ops
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
+
from .platforms import current_platform
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
build/torch25-cxx11-cu121-x86_64-linux/moe/__init__.py
CHANGED
@@ -1,19 +1,5 @@
|
|
1 |
-
from typing import TYPE_CHECKING
|
2 |
-
|
3 |
import torch
|
4 |
|
5 |
-
# neuron has torch version that doesn't even have impl_abstract
|
6 |
-
if TYPE_CHECKING:
|
7 |
-
|
8 |
-
def register_fake(fn):
|
9 |
-
return lambda name: fn
|
10 |
-
|
11 |
-
else:
|
12 |
-
try:
|
13 |
-
from torch.library import register_fake
|
14 |
-
except ImportError:
|
15 |
-
from torch.library import impl_abstract as register_fake
|
16 |
-
|
17 |
from ._ops import add_op_namespace_prefix, ops
|
18 |
from .fused_marlin_moe import fused_marlin_moe
|
19 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
@@ -91,39 +77,6 @@ def topk_softmax(
|
|
91 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
92 |
|
93 |
|
94 |
-
if hasattr(ops, "marlin_gemm_moe"):
|
95 |
-
|
96 |
-
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
97 |
-
def marlin_gemm_moe_fake(
|
98 |
-
a: torch.Tensor,
|
99 |
-
b_q_weights: torch.Tensor,
|
100 |
-
sorted_ids: torch.Tensor,
|
101 |
-
topk_weights: torch.Tensor,
|
102 |
-
topk_ids: torch.Tensor,
|
103 |
-
b_scales: torch.Tensor,
|
104 |
-
b_zero_points: torch.Tensor,
|
105 |
-
g_idx: torch.Tensor,
|
106 |
-
perm: torch.Tensor,
|
107 |
-
workspace: torch.Tensor,
|
108 |
-
b_q_type: ScalarType,
|
109 |
-
size_m: torch.SymInt,
|
110 |
-
size_n: torch.SymInt,
|
111 |
-
size_k: torch.SymInt,
|
112 |
-
is_k_full: bool,
|
113 |
-
num_experts: int,
|
114 |
-
topk: int,
|
115 |
-
moe_block_size: int,
|
116 |
-
replicate_input: bool,
|
117 |
-
apply_weights: bool,
|
118 |
-
) -> torch.Tensor:
|
119 |
-
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
120 |
-
|
121 |
-
|
122 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
123 |
-
ops.silu_and_mul(out, x)
|
124 |
-
return out
|
125 |
-
|
126 |
-
|
127 |
__all__ = [
|
128 |
"gptq_marlin_moe_repack",
|
129 |
"awq_marlin_moe_repack",
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from ._ops import add_op_namespace_prefix, ops
|
4 |
from .fused_marlin_moe import fused_marlin_moe
|
5 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
|
|
77 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
__all__ = [
|
81 |
"gptq_marlin_moe_repack",
|
82 |
"awq_marlin_moe_repack",
|
build/{torch24-cxx98-cu118-x86_64-linux/moe/_moe_v3wdnwni3a5ce.abi3.so β torch25-cxx11-cu121-x86_64-linux/moe/_moe_ezuwtpw27xv6u.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:378a8a453186ae62a92342077a988271cd7a02f46fbe303b4505d4484f1bfaef
|
3 |
+
size 84364536
|
build/torch25-cxx11-cu121-x86_64-linux/moe/_ops.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import torch
|
2 |
-
from . import
|
3 |
-
ops = torch.ops.
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
-
return f"
|
|
|
1 |
import torch
|
2 |
+
from . import _moe_ezuwtpw27xv6u
|
3 |
+
ops = torch.ops._moe_ezuwtpw27xv6u
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
+
return f"_moe_ezuwtpw27xv6u::{op_name}"
|
build/torch25-cxx11-cu121-x86_64-linux/moe/fused_marlin_moe.py
CHANGED
@@ -1,13 +1,25 @@
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
-
from typing import Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
|
|
8 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
9 |
-
from .scalar_type import scalar_types
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
@@ -116,7 +128,7 @@ def single_marlin_moe(
|
|
116 |
|
117 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
118 |
|
119 |
-
intermediate_cache = ops.
|
120 |
hidden_states,
|
121 |
w,
|
122 |
sorted_token_ids,
|
@@ -287,7 +299,7 @@ def fused_marlin_moe(
|
|
287 |
dtype=hidden_states.dtype,
|
288 |
)
|
289 |
|
290 |
-
intermediate_cache1 = ops.
|
291 |
hidden_states,
|
292 |
w1,
|
293 |
sorted_token_ids,
|
@@ -312,7 +324,7 @@ def fused_marlin_moe(
|
|
312 |
|
313 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
314 |
|
315 |
-
intermediate_cache3 = ops.
|
316 |
intermediate_cache2,
|
317 |
w2,
|
318 |
sorted_token_ids,
|
@@ -336,3 +348,31 @@ def fused_marlin_moe(
|
|
336 |
)
|
337 |
|
338 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
8 |
+
from ._ops import add_op_namespace_prefix, ops
|
9 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
10 |
+
from .scalar_type import ScalarType, scalar_types
|
11 |
+
|
12 |
+
# neuron has torch version that doesn't even have impl_abstract
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
|
15 |
+
def register_fake(fn):
|
16 |
+
return lambda name: fn
|
17 |
+
|
18 |
+
else:
|
19 |
+
try:
|
20 |
+
from torch.library import register_fake
|
21 |
+
except ImportError:
|
22 |
+
from torch.library import impl_abstract as register_fake
|
23 |
|
24 |
|
25 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
|
|
128 |
|
129 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
130 |
|
131 |
+
intermediate_cache = ops.marlin_gemm_moe(
|
132 |
hidden_states,
|
133 |
w,
|
134 |
sorted_token_ids,
|
|
|
299 |
dtype=hidden_states.dtype,
|
300 |
)
|
301 |
|
302 |
+
intermediate_cache1 = ops.marlin_gemm_moe(
|
303 |
hidden_states,
|
304 |
w1,
|
305 |
sorted_token_ids,
|
|
|
324 |
|
325 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
326 |
|
327 |
+
intermediate_cache3 = ops.marlin_gemm_moe(
|
328 |
intermediate_cache2,
|
329 |
w2,
|
330 |
sorted_token_ids,
|
|
|
348 |
)
|
349 |
|
350 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
351 |
+
|
352 |
+
|
353 |
+
if hasattr(ops, "marlin_gemm_moe"):
|
354 |
+
|
355 |
+
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
356 |
+
def marlin_gemm_moe_fake(
|
357 |
+
a: torch.Tensor,
|
358 |
+
b_q_weights: torch.Tensor,
|
359 |
+
sorted_ids: torch.Tensor,
|
360 |
+
topk_weights: torch.Tensor,
|
361 |
+
topk_ids: torch.Tensor,
|
362 |
+
b_scales: torch.Tensor,
|
363 |
+
b_zero_points: torch.Tensor,
|
364 |
+
g_idx: torch.Tensor,
|
365 |
+
perm: torch.Tensor,
|
366 |
+
workspace: torch.Tensor,
|
367 |
+
b_q_type: ScalarType,
|
368 |
+
size_m: torch.SymInt,
|
369 |
+
size_n: torch.SymInt,
|
370 |
+
size_k: torch.SymInt,
|
371 |
+
is_k_full: bool,
|
372 |
+
num_experts: int,
|
373 |
+
topk: int,
|
374 |
+
moe_block_size: int,
|
375 |
+
replicate_input: bool,
|
376 |
+
apply_weights: bool,
|
377 |
+
) -> torch.Tensor:
|
378 |
+
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
build/torch25-cxx11-cu121-x86_64-linux/moe/fused_moe.py
CHANGED
@@ -9,9 +9,9 @@ import torch
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
-
from .
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
-
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
+
from ._ops import ops
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
+
from .platforms import current_platform
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
build/torch25-cxx11-cu124-x86_64-linux/moe/__init__.py
CHANGED
@@ -1,19 +1,5 @@
|
|
1 |
-
from typing import TYPE_CHECKING
|
2 |
-
|
3 |
import torch
|
4 |
|
5 |
-
# neuron has torch version that doesn't even have impl_abstract
|
6 |
-
if TYPE_CHECKING:
|
7 |
-
|
8 |
-
def register_fake(fn):
|
9 |
-
return lambda name: fn
|
10 |
-
|
11 |
-
else:
|
12 |
-
try:
|
13 |
-
from torch.library import register_fake
|
14 |
-
except ImportError:
|
15 |
-
from torch.library import impl_abstract as register_fake
|
16 |
-
|
17 |
from ._ops import add_op_namespace_prefix, ops
|
18 |
from .fused_marlin_moe import fused_marlin_moe
|
19 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
@@ -91,39 +77,6 @@ def topk_softmax(
|
|
91 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
92 |
|
93 |
|
94 |
-
if hasattr(ops, "marlin_gemm_moe"):
|
95 |
-
|
96 |
-
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
97 |
-
def marlin_gemm_moe_fake(
|
98 |
-
a: torch.Tensor,
|
99 |
-
b_q_weights: torch.Tensor,
|
100 |
-
sorted_ids: torch.Tensor,
|
101 |
-
topk_weights: torch.Tensor,
|
102 |
-
topk_ids: torch.Tensor,
|
103 |
-
b_scales: torch.Tensor,
|
104 |
-
b_zero_points: torch.Tensor,
|
105 |
-
g_idx: torch.Tensor,
|
106 |
-
perm: torch.Tensor,
|
107 |
-
workspace: torch.Tensor,
|
108 |
-
b_q_type: ScalarType,
|
109 |
-
size_m: torch.SymInt,
|
110 |
-
size_n: torch.SymInt,
|
111 |
-
size_k: torch.SymInt,
|
112 |
-
is_k_full: bool,
|
113 |
-
num_experts: int,
|
114 |
-
topk: int,
|
115 |
-
moe_block_size: int,
|
116 |
-
replicate_input: bool,
|
117 |
-
apply_weights: bool,
|
118 |
-
) -> torch.Tensor:
|
119 |
-
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
120 |
-
|
121 |
-
|
122 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
123 |
-
ops.silu_and_mul(out, x)
|
124 |
-
return out
|
125 |
-
|
126 |
-
|
127 |
__all__ = [
|
128 |
"gptq_marlin_moe_repack",
|
129 |
"awq_marlin_moe_repack",
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from ._ops import add_op_namespace_prefix, ops
|
4 |
from .fused_marlin_moe import fused_marlin_moe
|
5 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
|
|
77 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
__all__ = [
|
81 |
"gptq_marlin_moe_repack",
|
82 |
"awq_marlin_moe_repack",
|
build/{torch24-cxx11-cu124-x86_64-linux/moe/_moe_sg5gu4g3brle6.abi3.so β torch25-cxx11-cu124-x86_64-linux/moe/_moe_b3lelvb3xhtk2.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 84063128
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3ae1204c5e2f4c7692676e0ef703dbab4f20a9f14652c75dee41b8d56560db19
|
3 |
size 84063128
|
build/torch25-cxx11-cu124-x86_64-linux/moe/_ops.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import torch
|
2 |
-
from . import
|
3 |
-
ops = torch.ops.
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
-
return f"
|
|
|
1 |
import torch
|
2 |
+
from . import _moe_b3lelvb3xhtk2
|
3 |
+
ops = torch.ops._moe_b3lelvb3xhtk2
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
+
return f"_moe_b3lelvb3xhtk2::{op_name}"
|
build/torch25-cxx11-cu124-x86_64-linux/moe/fused_marlin_moe.py
CHANGED
@@ -1,13 +1,25 @@
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
-
from typing import Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
|
|
8 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
9 |
-
from .scalar_type import scalar_types
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
@@ -116,7 +128,7 @@ def single_marlin_moe(
|
|
116 |
|
117 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
118 |
|
119 |
-
intermediate_cache = ops.
|
120 |
hidden_states,
|
121 |
w,
|
122 |
sorted_token_ids,
|
@@ -287,7 +299,7 @@ def fused_marlin_moe(
|
|
287 |
dtype=hidden_states.dtype,
|
288 |
)
|
289 |
|
290 |
-
intermediate_cache1 = ops.
|
291 |
hidden_states,
|
292 |
w1,
|
293 |
sorted_token_ids,
|
@@ -312,7 +324,7 @@ def fused_marlin_moe(
|
|
312 |
|
313 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
314 |
|
315 |
-
intermediate_cache3 = ops.
|
316 |
intermediate_cache2,
|
317 |
w2,
|
318 |
sorted_token_ids,
|
@@ -336,3 +348,31 @@ def fused_marlin_moe(
|
|
336 |
)
|
337 |
|
338 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""Fused MoE utilities for GPTQ."""
|
2 |
|
3 |
import functools
|
4 |
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
5 |
|
6 |
import torch
|
7 |
|
8 |
+
from ._ops import add_op_namespace_prefix, ops
|
9 |
from .fused_moe import fused_topk, moe_align_block_size, try_get_optimal_moe_config
|
10 |
+
from .scalar_type import ScalarType, scalar_types
|
11 |
+
|
12 |
+
# neuron has torch version that doesn't even have impl_abstract
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
|
15 |
+
def register_fake(fn):
|
16 |
+
return lambda name: fn
|
17 |
+
|
18 |
+
else:
|
19 |
+
try:
|
20 |
+
from torch.library import register_fake
|
21 |
+
except ImportError:
|
22 |
+
from torch.library import impl_abstract as register_fake
|
23 |
|
24 |
|
25 |
def get_scalar_type(num_bits: int, has_zp: bool):
|
|
|
128 |
|
129 |
scalar_type = get_scalar_type(num_bits, has_zero_point)
|
130 |
|
131 |
+
intermediate_cache = ops.marlin_gemm_moe(
|
132 |
hidden_states,
|
133 |
w,
|
134 |
sorted_token_ids,
|
|
|
299 |
dtype=hidden_states.dtype,
|
300 |
)
|
301 |
|
302 |
+
intermediate_cache1 = ops.marlin_gemm_moe(
|
303 |
hidden_states,
|
304 |
w1,
|
305 |
sorted_token_ids,
|
|
|
324 |
|
325 |
ops.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, 2 * N))
|
326 |
|
327 |
+
intermediate_cache3 = ops.marlin_gemm_moe(
|
328 |
intermediate_cache2,
|
329 |
w2,
|
330 |
sorted_token_ids,
|
|
|
348 |
)
|
349 |
|
350 |
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape), dim=1)
|
351 |
+
|
352 |
+
|
353 |
+
if hasattr(ops, "marlin_gemm_moe"):
|
354 |
+
|
355 |
+
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
356 |
+
def marlin_gemm_moe_fake(
|
357 |
+
a: torch.Tensor,
|
358 |
+
b_q_weights: torch.Tensor,
|
359 |
+
sorted_ids: torch.Tensor,
|
360 |
+
topk_weights: torch.Tensor,
|
361 |
+
topk_ids: torch.Tensor,
|
362 |
+
b_scales: torch.Tensor,
|
363 |
+
b_zero_points: torch.Tensor,
|
364 |
+
g_idx: torch.Tensor,
|
365 |
+
perm: torch.Tensor,
|
366 |
+
workspace: torch.Tensor,
|
367 |
+
b_q_type: ScalarType,
|
368 |
+
size_m: torch.SymInt,
|
369 |
+
size_n: torch.SymInt,
|
370 |
+
size_k: torch.SymInt,
|
371 |
+
is_k_full: bool,
|
372 |
+
num_experts: int,
|
373 |
+
topk: int,
|
374 |
+
moe_block_size: int,
|
375 |
+
replicate_input: bool,
|
376 |
+
apply_weights: bool,
|
377 |
+
) -> torch.Tensor:
|
378 |
+
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
build/torch25-cxx11-cu124-x86_64-linux/moe/fused_moe.py
CHANGED
@@ -9,9 +9,9 @@ import torch
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
-
from .
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
-
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
|
|
9 |
import triton
|
10 |
import triton.language as tl
|
11 |
|
12 |
+
from ._ops import ops
|
13 |
from .fp8 import scaled_fp8_quant
|
14 |
+
from .platforms import current_platform
|
15 |
|
16 |
VLLM_FUSED_MOE_CHUNK_SIZE = int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768"))
|
17 |
|
build/torch25-cxx98-cu118-x86_64-linux/moe/__init__.py
CHANGED
@@ -1,19 +1,5 @@
|
|
1 |
-
from typing import TYPE_CHECKING
|
2 |
-
|
3 |
import torch
|
4 |
|
5 |
-
# neuron has torch version that doesn't even have impl_abstract
|
6 |
-
if TYPE_CHECKING:
|
7 |
-
|
8 |
-
def register_fake(fn):
|
9 |
-
return lambda name: fn
|
10 |
-
|
11 |
-
else:
|
12 |
-
try:
|
13 |
-
from torch.library import register_fake
|
14 |
-
except ImportError:
|
15 |
-
from torch.library import impl_abstract as register_fake
|
16 |
-
|
17 |
from ._ops import add_op_namespace_prefix, ops
|
18 |
from .fused_marlin_moe import fused_marlin_moe
|
19 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
@@ -91,39 +77,6 @@ def topk_softmax(
|
|
91 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
92 |
|
93 |
|
94 |
-
if hasattr(ops, "marlin_gemm_moe"):
|
95 |
-
|
96 |
-
@register_fake(add_op_namespace_prefix("marlin_gemm_moe"))
|
97 |
-
def marlin_gemm_moe_fake(
|
98 |
-
a: torch.Tensor,
|
99 |
-
b_q_weights: torch.Tensor,
|
100 |
-
sorted_ids: torch.Tensor,
|
101 |
-
topk_weights: torch.Tensor,
|
102 |
-
topk_ids: torch.Tensor,
|
103 |
-
b_scales: torch.Tensor,
|
104 |
-
b_zero_points: torch.Tensor,
|
105 |
-
g_idx: torch.Tensor,
|
106 |
-
perm: torch.Tensor,
|
107 |
-
workspace: torch.Tensor,
|
108 |
-
b_q_type: ScalarType,
|
109 |
-
size_m: torch.SymInt,
|
110 |
-
size_n: torch.SymInt,
|
111 |
-
size_k: torch.SymInt,
|
112 |
-
is_k_full: bool,
|
113 |
-
num_experts: int,
|
114 |
-
topk: int,
|
115 |
-
moe_block_size: int,
|
116 |
-
replicate_input: bool,
|
117 |
-
apply_weights: bool,
|
118 |
-
) -> torch.Tensor:
|
119 |
-
return torch.empty((size_m, topk, size_n), dtype=a.dtype, device=a.device)
|
120 |
-
|
121 |
-
|
122 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
123 |
-
ops.silu_and_mul(out, x)
|
124 |
-
return out
|
125 |
-
|
126 |
-
|
127 |
__all__ = [
|
128 |
"gptq_marlin_moe_repack",
|
129 |
"awq_marlin_moe_repack",
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from ._ops import add_op_namespace_prefix, ops
|
4 |
from .fused_marlin_moe import fused_marlin_moe
|
5 |
from .fused_moe import fused_moe, fused_topk, grouped_topk
|
|
|
77 |
ops.topk_softmax(topk_weights, topk_ids, token_expert_indicies, gating_output)
|
78 |
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
__all__ = [
|
81 |
"gptq_marlin_moe_repack",
|
82 |
"awq_marlin_moe_repack",
|
build/torch25-cxx98-cu118-x86_64-linux/moe/_moe_dtibz76vuxaaq.abi3.so
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b1eef7e6a15aca930caa813a845147beeec16159c8cce89891c40d080a6f3062
|
3 |
-
size 84157880
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu118-x86_64-linux/moe/_moe_mqt4gjnisx6je.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b8ebfaa74892fb13f34924a63e188b9593cc3290831bf31e0f78ae99c9526b0
|
3 |
+
size 84157856
|
build/torch25-cxx98-cu118-x86_64-linux/moe/_ops.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import torch
|
2 |
-
from . import
|
3 |
-
ops = torch.ops.
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
-
return f"
|
|
|
1 |
import torch
|
2 |
+
from . import _moe_mqt4gjnisx6je
|
3 |
+
ops = torch.ops._moe_mqt4gjnisx6je
|
4 |
|
5 |
def add_op_namespace_prefix(op_name: str):
|
6 |
"""
|
7 |
Prefix op by namespace.
|
8 |
"""
|
9 |
+
return f"_moe_mqt4gjnisx6je::{op_name}"
|