File size: 2,685 Bytes
c61ccee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from typing import (
    Any,
    Iterable,
    NamedTuple,
    Optional,
    overload,
    Sequence,
    Tuple,
    TypeVar,
    Union,
)

from typing_extensions import Self

from torch import Tensor

from torch._prims_common import DeviceLikeType
from torch.types import _dtype

class PackedSequence_(NamedTuple):
    data: Tensor
    batch_sizes: Tensor
    sorted_indices: Optional[Tensor]
    unsorted_indices: Optional[Tensor]

def bind(optional: Any, fn: Any): ...

_T = TypeVar("_T")

class PackedSequence(PackedSequence_):
    def __new__(

        cls,

        data: Tensor,

        batch_sizes: Optional[Tensor] = ...,

        sorted_indices: Optional[Tensor] = ...,

        unsorted_indices: Optional[Tensor] = ...,

    ) -> Self: ...
    def pin_memory(self: _T) -> _T: ...
    def cuda(self: _T, *args: Any, **kwargs: Any) -> _T: ...
    def cpu(self: _T) -> _T: ...
    def double(self: _T) -> _T: ...
    def float(self: _T) -> _T: ...
    def half(self: _T) -> _T: ...
    def long(self: _T) -> _T: ...
    def int(self: _T) -> _T: ...
    def short(self: _T) -> _T: ...
    def char(self: _T) -> _T: ...
    def byte(self: _T) -> _T: ...
    @overload
    def to(

        self: _T,

        dtype: _dtype,

        non_blocking: bool = False,

        copy: bool = False,

    ) -> _T: ...
    @overload
    def to(

        self: _T,

        device: Optional[DeviceLikeType] = None,

        dtype: Optional[_dtype] = None,

        non_blocking: bool = False,

        copy: bool = False,

    ) -> _T: ...
    @overload
    def to(

        self: _T,

        other: Tensor,

        non_blocking: bool = False,

        copy: bool = False,

    ) -> _T: ...
    @property
    def is_cuda(self) -> bool: ...
    def is_pinned(self) -> bool: ...

def invert_permutation(permutation: Optional[Tensor]): ...
def pack_padded_sequence(

    input: Tensor,

    lengths: Tensor,

    batch_first: bool = ...,

    enforce_sorted: bool = ...,

) -> PackedSequence: ...
def pad_packed_sequence(

    sequence: PackedSequence,

    batch_first: bool = ...,

    padding_value: float = ...,

    total_length: Optional[int] = ...,

) -> Tuple[Tensor, ...]: ...
def pad_sequence(

    sequences: Union[Tensor, Iterable[Tensor]],

    batch_first: bool = False,

    padding_value: float = ...,

) -> Tensor: ...
def pack_sequence(

    sequences: Sequence[Tensor],

    enforce_sorted: bool = ...,

) -> PackedSequence: ...
def get_packed_sequence(

    data: Tensor,

    batch_sizes: Optional[Tensor],

    sorted_indices: Optional[Tensor],

    unsorted_indices: Optional[Tensor],

) -> PackedSequence: ...