danieldk HF Staff commited on
Commit
efe3e5b
·
1 Parent(s): fff932d

Build (aarch64)

Browse files
build/torch26-cxx11-cu126-aarch64-linux/activation/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ._ops import ops
4
+
5
+ from . import layers
6
+
7
+
8
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
+ ops.silu_and_mul(out, x)
10
+ return out
11
+
12
+
13
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
14
+ ops.gelu_and_mul(out, x)
15
+ return out
16
+
17
+
18
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
+ ops.gelu_tanh_and_mul(out, x)
20
+ return out
21
+
22
+
23
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
24
+ ops.fatrelu_and_mul(out, x, threshold)
25
+ return out
26
+
27
+
28
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
29
+ ops.gelu_fast(out, x)
30
+ return out
31
+
32
+
33
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
34
+ ops.gelu_new(out, x)
35
+ return out
36
+
37
+
38
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
+ ops.gelu_quick(out, x)
40
+ return out
41
+
42
+
43
+ __all__ = [
44
+ "silu_and_mul",
45
+ "gelu_and_mul",
46
+ "gelu_tanh_and_mul",
47
+ "fatrelu_and_mul",
48
+ "gelu_fast",
49
+ "gelu_new",
50
+ "gelu_quick",
51
+ "layers",
52
+ ]
build/torch26-cxx11-cu126-aarch64-linux/activation/_activation_c444f33.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:895ef59c00498ac74a5b3f1ce40d25bc5d44a0b5440538a6331102cad55d3d3e
3
+ size 2500840
build/torch26-cxx11-cu126-aarch64-linux/activation/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_c444f33
3
+ ops = torch.ops._activation_c444f33
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_c444f33::{op_name}"
build/torch26-cxx11-cu126-aarch64-linux/activation/layers.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class SiluAndMul(nn.Module):
8
+ def forward(self, x: torch.Tensor):
9
+ d = x.shape[-1] // 2
10
+ output_shape = x.shape[:-1] + (d,)
11
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
12
+ ops.silu_and_mul(out, x)
13
+ return out
14
+
15
+
16
+ class GeluAndMul(nn.Module):
17
+ def forward(self, x: torch.Tensor):
18
+ d = x.shape[-1] // 2
19
+ output_shape = x.shape[:-1] + (d,)
20
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
21
+ ops.gelu_and_mul(out, x)
22
+ return out
23
+
24
+
25
+ class GeluTanhAndMul(nn.Module):
26
+ def forward(self, x: torch.Tensor):
27
+ d = x.shape[-1] // 2
28
+ output_shape = x.shape[:-1] + (d,)
29
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
30
+ ops.gelu_tanh_and_mul(out, x)
31
+ return out
32
+
33
+
34
+ class FatreluAndMul(nn.Module):
35
+ def __init__(self, threshold: float = 0.0):
36
+ super().__init__()
37
+ self.threshold = threshold
38
+
39
+ def forward(self, x: torch.Tensor):
40
+ d = x.shape[-1] // 2
41
+ output_shape = x.shape[:-1] + (d,)
42
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
43
+ ops.fatrelu_and_mul(out, x, self.threshold)
44
+ return out
45
+
46
+
47
+ class FastGELU(nn.Module):
48
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
49
+ out = torch.empty_like(x)
50
+ ops.gelu_fast(out, x)
51
+ return out
52
+
53
+
54
+ class NewGELU(nn.Module):
55
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
56
+ out = torch.empty_like(x)
57
+ ops.gelu_new(out, x)
58
+ return out
59
+
60
+
61
+ class QuickGELU(nn.Module):
62
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
63
+ out = torch.empty_like(x)
64
+ ops.gelu_quick(out, x)
65
+ return out
build/torch26-cxx98-cu126-aarch64-linux/activation/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ._ops import ops
4
+
5
+ from . import layers
6
+
7
+
8
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
+ ops.silu_and_mul(out, x)
10
+ return out
11
+
12
+
13
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
14
+ ops.gelu_and_mul(out, x)
15
+ return out
16
+
17
+
18
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
+ ops.gelu_tanh_and_mul(out, x)
20
+ return out
21
+
22
+
23
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
24
+ ops.fatrelu_and_mul(out, x, threshold)
25
+ return out
26
+
27
+
28
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
29
+ ops.gelu_fast(out, x)
30
+ return out
31
+
32
+
33
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
34
+ ops.gelu_new(out, x)
35
+ return out
36
+
37
+
38
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
+ ops.gelu_quick(out, x)
40
+ return out
41
+
42
+
43
+ __all__ = [
44
+ "silu_and_mul",
45
+ "gelu_and_mul",
46
+ "gelu_tanh_and_mul",
47
+ "fatrelu_and_mul",
48
+ "gelu_fast",
49
+ "gelu_new",
50
+ "gelu_quick",
51
+ "layers",
52
+ ]
build/torch26-cxx98-cu126-aarch64-linux/activation/_activation_c444f33.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b83bc746b0755f39afa8a7ea023efecc1f7f3aec6d5b3952d37d58ec01d3c1c
3
+ size 2497024
build/torch26-cxx98-cu126-aarch64-linux/activation/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_c444f33
3
+ ops = torch.ops._activation_c444f33
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_c444f33::{op_name}"
build/torch26-cxx98-cu126-aarch64-linux/activation/layers.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class SiluAndMul(nn.Module):
8
+ def forward(self, x: torch.Tensor):
9
+ d = x.shape[-1] // 2
10
+ output_shape = x.shape[:-1] + (d,)
11
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
12
+ ops.silu_and_mul(out, x)
13
+ return out
14
+
15
+
16
+ class GeluAndMul(nn.Module):
17
+ def forward(self, x: torch.Tensor):
18
+ d = x.shape[-1] // 2
19
+ output_shape = x.shape[:-1] + (d,)
20
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
21
+ ops.gelu_and_mul(out, x)
22
+ return out
23
+
24
+
25
+ class GeluTanhAndMul(nn.Module):
26
+ def forward(self, x: torch.Tensor):
27
+ d = x.shape[-1] // 2
28
+ output_shape = x.shape[:-1] + (d,)
29
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
30
+ ops.gelu_tanh_and_mul(out, x)
31
+ return out
32
+
33
+
34
+ class FatreluAndMul(nn.Module):
35
+ def __init__(self, threshold: float = 0.0):
36
+ super().__init__()
37
+ self.threshold = threshold
38
+
39
+ def forward(self, x: torch.Tensor):
40
+ d = x.shape[-1] // 2
41
+ output_shape = x.shape[:-1] + (d,)
42
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
43
+ ops.fatrelu_and_mul(out, x, self.threshold)
44
+ return out
45
+
46
+
47
+ class FastGELU(nn.Module):
48
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
49
+ out = torch.empty_like(x)
50
+ ops.gelu_fast(out, x)
51
+ return out
52
+
53
+
54
+ class NewGELU(nn.Module):
55
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
56
+ out = torch.empty_like(x)
57
+ ops.gelu_new(out, x)
58
+ return out
59
+
60
+
61
+ class QuickGELU(nn.Module):
62
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
63
+ out = torch.empty_like(x)
64
+ ops.gelu_quick(out, x)
65
+ return out
build/torch27-cxx11-cu126-aarch64-linux/activation/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ._ops import ops
4
+
5
+ from . import layers
6
+
7
+
8
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
+ ops.silu_and_mul(out, x)
10
+ return out
11
+
12
+
13
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
14
+ ops.gelu_and_mul(out, x)
15
+ return out
16
+
17
+
18
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
+ ops.gelu_tanh_and_mul(out, x)
20
+ return out
21
+
22
+
23
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
24
+ ops.fatrelu_and_mul(out, x, threshold)
25
+ return out
26
+
27
+
28
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
29
+ ops.gelu_fast(out, x)
30
+ return out
31
+
32
+
33
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
34
+ ops.gelu_new(out, x)
35
+ return out
36
+
37
+
38
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
+ ops.gelu_quick(out, x)
40
+ return out
41
+
42
+
43
+ __all__ = [
44
+ "silu_and_mul",
45
+ "gelu_and_mul",
46
+ "gelu_tanh_and_mul",
47
+ "fatrelu_and_mul",
48
+ "gelu_fast",
49
+ "gelu_new",
50
+ "gelu_quick",
51
+ "layers",
52
+ ]
build/torch27-cxx11-cu126-aarch64-linux/activation/_activation_c444f33.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83545d0f7507ea6c78d8f28fd3c517ad0d848d813978829eb2eddba997843fe4
3
+ size 2501064
build/torch27-cxx11-cu126-aarch64-linux/activation/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_c444f33
3
+ ops = torch.ops._activation_c444f33
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_c444f33::{op_name}"
build/torch27-cxx11-cu126-aarch64-linux/activation/layers.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class SiluAndMul(nn.Module):
8
+ def forward(self, x: torch.Tensor):
9
+ d = x.shape[-1] // 2
10
+ output_shape = x.shape[:-1] + (d,)
11
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
12
+ ops.silu_and_mul(out, x)
13
+ return out
14
+
15
+
16
+ class GeluAndMul(nn.Module):
17
+ def forward(self, x: torch.Tensor):
18
+ d = x.shape[-1] // 2
19
+ output_shape = x.shape[:-1] + (d,)
20
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
21
+ ops.gelu_and_mul(out, x)
22
+ return out
23
+
24
+
25
+ class GeluTanhAndMul(nn.Module):
26
+ def forward(self, x: torch.Tensor):
27
+ d = x.shape[-1] // 2
28
+ output_shape = x.shape[:-1] + (d,)
29
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
30
+ ops.gelu_tanh_and_mul(out, x)
31
+ return out
32
+
33
+
34
+ class FatreluAndMul(nn.Module):
35
+ def __init__(self, threshold: float = 0.0):
36
+ super().__init__()
37
+ self.threshold = threshold
38
+
39
+ def forward(self, x: torch.Tensor):
40
+ d = x.shape[-1] // 2
41
+ output_shape = x.shape[:-1] + (d,)
42
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
43
+ ops.fatrelu_and_mul(out, x, self.threshold)
44
+ return out
45
+
46
+
47
+ class FastGELU(nn.Module):
48
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
49
+ out = torch.empty_like(x)
50
+ ops.gelu_fast(out, x)
51
+ return out
52
+
53
+
54
+ class NewGELU(nn.Module):
55
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
56
+ out = torch.empty_like(x)
57
+ ops.gelu_new(out, x)
58
+ return out
59
+
60
+
61
+ class QuickGELU(nn.Module):
62
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
63
+ out = torch.empty_like(x)
64
+ ops.gelu_quick(out, x)
65
+ return out
build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ._ops import ops
4
+
5
+ from . import layers
6
+
7
+
8
+ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
+ ops.silu_and_mul(out, x)
10
+ return out
11
+
12
+
13
+ def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
14
+ ops.gelu_and_mul(out, x)
15
+ return out
16
+
17
+
18
+ def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
+ ops.gelu_tanh_and_mul(out, x)
20
+ return out
21
+
22
+
23
+ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
24
+ ops.fatrelu_and_mul(out, x, threshold)
25
+ return out
26
+
27
+
28
+ def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
29
+ ops.gelu_fast(out, x)
30
+ return out
31
+
32
+
33
+ def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
34
+ ops.gelu_new(out, x)
35
+ return out
36
+
37
+
38
+ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
+ ops.gelu_quick(out, x)
40
+ return out
41
+
42
+
43
+ __all__ = [
44
+ "silu_and_mul",
45
+ "gelu_and_mul",
46
+ "gelu_tanh_and_mul",
47
+ "fatrelu_and_mul",
48
+ "gelu_fast",
49
+ "gelu_new",
50
+ "gelu_quick",
51
+ "layers",
52
+ ]
build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_c444f33.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38569b82c436152f9adb853050c2aa2b8e5b990023d05e9a944f2a7d3053a641
3
+ size 2501160
build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_c444f33
3
+ ops = torch.ops._activation_c444f33
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_c444f33::{op_name}"
build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ class SiluAndMul(nn.Module):
8
+ def forward(self, x: torch.Tensor):
9
+ d = x.shape[-1] // 2
10
+ output_shape = x.shape[:-1] + (d,)
11
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
12
+ ops.silu_and_mul(out, x)
13
+ return out
14
+
15
+
16
+ class GeluAndMul(nn.Module):
17
+ def forward(self, x: torch.Tensor):
18
+ d = x.shape[-1] // 2
19
+ output_shape = x.shape[:-1] + (d,)
20
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
21
+ ops.gelu_and_mul(out, x)
22
+ return out
23
+
24
+
25
+ class GeluTanhAndMul(nn.Module):
26
+ def forward(self, x: torch.Tensor):
27
+ d = x.shape[-1] // 2
28
+ output_shape = x.shape[:-1] + (d,)
29
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
30
+ ops.gelu_tanh_and_mul(out, x)
31
+ return out
32
+
33
+
34
+ class FatreluAndMul(nn.Module):
35
+ def __init__(self, threshold: float = 0.0):
36
+ super().__init__()
37
+ self.threshold = threshold
38
+
39
+ def forward(self, x: torch.Tensor):
40
+ d = x.shape[-1] // 2
41
+ output_shape = x.shape[:-1] + (d,)
42
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
43
+ ops.fatrelu_and_mul(out, x, self.threshold)
44
+ return out
45
+
46
+
47
+ class FastGELU(nn.Module):
48
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
49
+ out = torch.empty_like(x)
50
+ ops.gelu_fast(out, x)
51
+ return out
52
+
53
+
54
+ class NewGELU(nn.Module):
55
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
56
+ out = torch.empty_like(x)
57
+ ops.gelu_new(out, x)
58
+ return out
59
+
60
+
61
+ class QuickGELU(nn.Module):
62
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
63
+ out = torch.empty_like(x)
64
+ ops.gelu_quick(out, x)
65
+ return out