Add extension versioning
Browse files- build.toml +3 -0
- build/torch24-cxx11-cu118-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx11-cu118-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch24-cxx11-cu118-x86_64-linux/activation/_ops.py +3 -0
- build/torch24-cxx11-cu121-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx11-cu121-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch24-cxx11-cu121-x86_64-linux/activation/_ops.py +3 -0
- build/torch24-cxx11-cu124-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx11-cu124-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch24-cxx11-cu124-x86_64-linux/activation/_ops.py +3 -0
- build/torch24-cxx98-cu118-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx98-cu118-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch24-cxx98-cu118-x86_64-linux/activation/_ops.py +3 -0
- build/torch24-cxx98-cu121-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx98-cu121-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch24-cxx98-cu121-x86_64-linux/activation/_ops.py +3 -0
- build/torch24-cxx98-cu124-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx98-cu124-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch24-cxx98-cu124-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx11-cu118-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx11-cu121-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx11-cu124-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx98-cu118-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx98-cu121-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx98-cu124-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so} +0 -0
- build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py +3 -0
- ext-torch/__init__.py +17 -8
build.toml
CHANGED
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
1 |
[torch]
|
2 |
name = "activation"
|
3 |
src = [
|
|
|
1 |
+
[general]
|
2 |
+
version = "0.0.1"
|
3 |
+
|
4 |
[torch]
|
5 |
name = "activation"
|
6 |
src = [
|
build/torch24-cxx11-cu118-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx11-cu118-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch24-cxx11-cu118-x86_64-linux/activation/_activation.abi3.so and b/build/torch24-cxx11-cu118-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch24-cxx11-cu118-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch24-cxx11-cu121-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx11-cu121-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch24-cxx11-cu121-x86_64-linux/activation/_activation.abi3.so and b/build/torch24-cxx11-cu121-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch24-cxx11-cu121-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch24-cxx11-cu124-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx11-cu124-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch24-cxx11-cu124-x86_64-linux/activation/_activation.abi3.so and b/build/torch24-cxx11-cu124-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch24-cxx11-cu124-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch24-cxx98-cu118-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx98-cu118-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch24-cxx98-cu118-x86_64-linux/activation/_activation.abi3.so and b/build/torch24-cxx98-cu118-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch24-cxx98-cu118-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch24-cxx98-cu121-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx98-cu121-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch24-cxx98-cu121-x86_64-linux/activation/_activation.abi3.so and b/build/torch24-cxx98-cu121-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch24-cxx98-cu121-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch24-cxx98-cu124-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx98-cu124-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch24-cxx98-cu124-x86_64-linux/activation/_activation.abi3.so and b/build/torch24-cxx98-cu124-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch24-cxx98-cu124-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx11-cu118-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation.abi3.so and b/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx11-cu121-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation.abi3.so and b/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx11-cu124-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation.abi3.so and b/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx98-cu118-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation.abi3.so and b/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx98-cu121-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation.abi3.so and b/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx98-cu124-x86_64-linux/activation/{_activation.abi3.so β _activation_0_0_1.abi3.so}
RENAMED
Binary files a/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation.abi3.so and b/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_0_0_1.abi3.so differ
|
|
build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
ext-torch/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|