Add extension versioning
Browse files- build.toml +3 -0
- build/torch24-cxx11-cu118-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx11-cu118-x86_64-linux/activation/{_activation.abi3.so → _activation_0_0_1.abi3.so} +2 -2
- build/torch24-cxx11-cu118-x86_64-linux/activation/_ops.py +3 -0
- build/torch24-cxx11-cu121-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx11-cu121-x86_64-linux/activation/{_activation.abi3.so → _activation_0_0_1.abi3.so} +2 -2
- build/torch24-cxx11-cu121-x86_64-linux/activation/_ops.py +3 -0
- build/torch24-cxx11-cu124-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx11-cu124-x86_64-linux/activation/{_activation.abi3.so → _activation_0_0_1.abi3.so} +2 -2
- build/torch24-cxx11-cu124-x86_64-linux/activation/_ops.py +3 -0
- build/torch24-cxx98-cu118-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx98-cu118-x86_64-linux/activation/{_activation.abi3.so → _activation_0_0_1.abi3.so} +2 -2
- build/torch24-cxx98-cu118-x86_64-linux/activation/_ops.py +3 -0
- build/torch24-cxx98-cu121-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx98-cu121-x86_64-linux/activation/_activation.abi3.so +0 -3
- build/torch24-cxx98-cu121-x86_64-linux/activation/_activation_0_0_1.abi3.so +3 -0
- build/torch24-cxx98-cu121-x86_64-linux/activation/_ops.py +3 -0
- build/torch24-cxx98-cu124-x86_64-linux/activation/__init__.py +17 -8
- build/torch24-cxx98-cu124-x86_64-linux/activation/_activation.abi3.so +0 -3
- build/torch24-cxx98-cu124-x86_64-linux/activation/_activation_0_0_1.abi3.so +3 -0
- build/torch24-cxx98-cu124-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx11-cu118-x86_64-linux/activation/_activation.abi3.so +0 -3
- build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_0_0_1.abi3.so +3 -0
- build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx11-cu121-x86_64-linux/activation/_activation.abi3.so +0 -3
- build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_0_0_1.abi3.so +3 -0
- build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx11-cu124-x86_64-linux/activation/_activation.abi3.so +0 -3
- build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_0_0_1.abi3.so +3 -0
- build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx98-cu118-x86_64-linux/activation/_activation.abi3.so +0 -3
- build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_0_0_1.abi3.so +3 -0
- build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx98-cu121-x86_64-linux/activation/_activation.abi3.so +0 -3
- build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_0_0_1.abi3.so +3 -0
- build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py +3 -0
- build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py +17 -8
- build/torch25-cxx98-cu124-x86_64-linux/activation/_activation.abi3.so +0 -3
- build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_0_0_1.abi3.so +3 -0
- build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py +3 -0
- ext-torch/__init__.py +17 -8
build.toml
CHANGED
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
1 |
[torch]
|
2 |
name = "activation"
|
3 |
src = [
|
|
|
1 |
+
[general]
|
2 |
+
version = "0.0.1"
|
3 |
+
|
4 |
[torch]
|
5 |
name = "activation"
|
6 |
src = [
|
build/torch24-cxx11-cu118-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx11-cu118-x86_64-linux/activation/{_activation.abi3.so → _activation_0_0_1.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75cc4d6e78b8c6de3cd5dca2e8c0e66deebe84756cd5e46697f1194d4b7824e8
|
3 |
+
size 775080
|
build/torch24-cxx11-cu118-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch24-cxx11-cu121-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx11-cu121-x86_64-linux/activation/{_activation.abi3.so → _activation_0_0_1.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:54cd203d14bd0aecb50741dce4b56b9c4d828d357956d5a019fcc8af082eabe4
|
3 |
+
size 771216
|
build/torch24-cxx11-cu121-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch24-cxx11-cu124-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx11-cu124-x86_64-linux/activation/{_activation.abi3.so → _activation_0_0_1.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d9be0c307589972018be48e8d3bf658f07ce6e51482a3b6ed89310af86250ed
|
3 |
+
size 808000
|
build/torch24-cxx11-cu124-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch24-cxx98-cu118-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx98-cu118-x86_64-linux/activation/{_activation.abi3.so → _activation_0_0_1.abi3.so}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23cb6373e29f7bb4171bae06dd294876681c7f7bb07315aa0a1d2b8b4583cda3
|
3 |
+
size 761480
|
build/torch24-cxx98-cu118-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch24-cxx98-cu121-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx98-cu121-x86_64-linux/activation/_activation.abi3.so
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:3df5b3748a14214c1c86100f14f14e6125efada9079955dc0d65d7575a6debb6
|
3 |
-
size 757400
|
|
|
|
|
|
|
|
build/torch24-cxx98-cu121-x86_64-linux/activation/_activation_0_0_1.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad1a5445296703479f3201e099259cc4aa4459f8ae5117f8235dc1636582dcc2
|
3 |
+
size 757424
|
build/torch24-cxx98-cu121-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch24-cxx98-cu124-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch24-cxx98-cu124-x86_64-linux/activation/_activation.abi3.so
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d3c9e874d518d522c6fb3106515376eaa7151bdbba9b029f3c70448a65d6895d
|
3 |
-
size 798360
|
|
|
|
|
|
|
|
build/torch24-cxx98-cu124-x86_64-linux/activation/_activation_0_0_1.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:09e025b6b8a301e237947cb7c35be59511be31d3bdbfa63be0d7480490551d7b
|
3 |
+
size 798384
|
build/torch24-cxx98-cu124-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx11-cu118-x86_64-linux/activation/_activation.abi3.so
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:8e7b40d071f2fdb422d1caee712e5d0328c3cd85dc17695923fc11885a397f75
|
3 |
-
size 775048
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_0_0_1.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2004d875a8fcaa64d3324e427b908abc8d339cffc38b9fabdb5666dac435fbfe
|
3 |
+
size 775080
|
build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx11-cu121-x86_64-linux/activation/_activation.abi3.so
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:865e5b0255dbc6566459444b9f2325f9f54e568213476a66992314ebb1a47e1d
|
3 |
-
size 771184
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_0_0_1.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd24afaeecddebb5cfb6e71ba7e30d5ec3588e74928917935b36ef4be0af37c6
|
3 |
+
size 771216
|
build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx11-cu124-x86_64-linux/activation/_activation.abi3.so
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:3d0524c8b70ea4e427ca499312f076e834d06fe11bea9dd4decb2406a3094c8a
|
3 |
-
size 807968
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_0_0_1.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:873f027d0761a39b5a34b27718e4a2b04db1fe724eebc1f6469c6a2cdc36ea16
|
3 |
+
size 808000
|
build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx98-cu118-x86_64-linux/activation/_activation.abi3.so
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:f1a3146807c8c7c97a5222dae8fe0aa186ee01d5bc466bf3b74b49e23ecd156a
|
3 |
-
size 761456
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_0_0_1.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf2c0968222ddc059adf3c1726e9aeb9cba4f3c42cbf2b814d5ddbe36956c232
|
3 |
+
size 761480
|
build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx98-cu121-x86_64-linux/activation/_activation.abi3.so
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5536093e89b5707856d62f677da924c5e3c51e0a2868d3e7e11475508c97d5cf
|
3 |
-
size 757400
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_0_0_1.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ff5b67d79c949546bfc2ced1abf3eaaeec56a707929185ca41baa6d68054d5b1
|
3 |
+
size 757424
|
build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|
build/torch25-cxx98-cu124-x86_64-linux/activation/_activation.abi3.so
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:aa6928c9ae395303ab136e1d9761316a77da82f0a0b000996b52d9667f0b1f84
|
3 |
-
size 798360
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_0_0_1.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1f371ce792c304716c75c523e1664c428a70e3af92bd3ca2053f5052139bf199
|
3 |
+
size 798384
|
build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _activation_0_0_1
|
3 |
+
ops = torch.ops._activation_0_0_1
|
ext-torch/__init__.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
6 |
-
|
7 |
|
8 |
|
9 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
10 |
-
|
11 |
|
12 |
|
13 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
14 |
-
|
15 |
|
16 |
|
17 |
def fatrelu_and_mul(out: torch.Tensor,
|
18 |
x: torch.Tensor,
|
19 |
threshold: float = 0.0) -> None:
|
20 |
-
|
21 |
|
22 |
|
23 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
24 |
-
|
25 |
|
26 |
|
27 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
28 |
-
|
29 |
|
30 |
|
31 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
32 |
-
|
|
|
1 |
import torch
|
2 |
|
3 |
+
try:
|
4 |
+
from ._ops import ops
|
5 |
+
except ImportError as e:
|
6 |
+
# Fallback for local development.
|
7 |
+
try:
|
8 |
+
import _activation
|
9 |
+
ops = torch.ops._activition
|
10 |
+
except ImportError:
|
11 |
+
raise e
|
12 |
+
|
13 |
|
14 |
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
15 |
+
ops.silu_and_mul(out, x)
|
16 |
|
17 |
|
18 |
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
19 |
+
ops.gelu_and_mul(out, x)
|
20 |
|
21 |
|
22 |
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
23 |
+
ops.gelu_tanh_and_mul(out, x)
|
24 |
|
25 |
|
26 |
def fatrelu_and_mul(out: torch.Tensor,
|
27 |
x: torch.Tensor,
|
28 |
threshold: float = 0.0) -> None:
|
29 |
+
ops.fatrelu_and_mul(out, x, threshold)
|
30 |
|
31 |
|
32 |
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
33 |
+
ops.gelu_fast(out, x)
|
34 |
|
35 |
|
36 |
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
37 |
+
ops.gelu_new(out, x)
|
38 |
|
39 |
|
40 |
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
41 |
+
ops.gelu_quick(out, x)
|