Spaces:
Sleeping
Sleeping
from numbers import Number | |
import torch | |
from torch.distributions import constraints | |
from torch.distributions.exp_family import ExponentialFamily | |
from torch.distributions.utils import broadcast_all | |
__all__ = ["Exponential"] | |
class Exponential(ExponentialFamily): | |
r""" | |
Creates a Exponential distribution parameterized by :attr:`rate`. | |
Example:: | |
>>> # xdoctest: +IGNORE_WANT("non-deterministic") | |
>>> m = Exponential(torch.tensor([1.0])) | |
>>> m.sample() # Exponential distributed with rate=1 | |
tensor([ 0.1046]) | |
Args: | |
rate (float or Tensor): rate = 1 / scale of the distribution | |
""" | |
arg_constraints = {"rate": constraints.positive} | |
support = constraints.nonnegative | |
has_rsample = True | |
_mean_carrier_measure = 0 | |
def mean(self): | |
return self.rate.reciprocal() | |
def mode(self): | |
return torch.zeros_like(self.rate) | |
def stddev(self): | |
return self.rate.reciprocal() | |
def variance(self): | |
return self.rate.pow(-2) | |
def __init__(self, rate, validate_args=None): | |
(self.rate,) = broadcast_all(rate) | |
batch_shape = torch.Size() if isinstance(rate, Number) else self.rate.size() | |
super().__init__(batch_shape, validate_args=validate_args) | |
def expand(self, batch_shape, _instance=None): | |
new = self._get_checked_instance(Exponential, _instance) | |
batch_shape = torch.Size(batch_shape) | |
new.rate = self.rate.expand(batch_shape) | |
super(Exponential, new).__init__(batch_shape, validate_args=False) | |
new._validate_args = self._validate_args | |
return new | |
def rsample(self, sample_shape=torch.Size()): | |
shape = self._extended_shape(sample_shape) | |
return self.rate.new(shape).exponential_() / self.rate | |
def log_prob(self, value): | |
if self._validate_args: | |
self._validate_sample(value) | |
return self.rate.log() - self.rate * value | |
def cdf(self, value): | |
if self._validate_args: | |
self._validate_sample(value) | |
return 1 - torch.exp(-self.rate * value) | |
def icdf(self, value): | |
return -torch.log1p(-value) / self.rate | |
def entropy(self): | |
return 1.0 - torch.log(self.rate) | |
def _natural_params(self): | |
return (-self.rate,) | |
def _log_normalizer(self, x): | |
return -torch.log(-x) | |