text
stringlengths
1
93.6k
else:
assert hasattr(self.cts_dist, "icdf")
cdf_min = self.cts_dist.cdf(torch.zeros_like(self.cts_dist.mean) - 1)
cdf_max = self.cts_dist.cdf(torch.ones_like(cdf_min))
u = Uniform(cdf_min, cdf_max, validate_args=False).sample(sample_shape)
cts_samp = self.cts_dist.icdf(u)
return quantize(cts_samp, self.num_bins)
class GMM(MixtureSameFamily):
def __init__(self, mix_wt_logits, means, std_devs):
mix_wts = torch_Categorical(logits=mix_wt_logits, validate_args=False)
components = Normal(means, std_devs, validate_args=False)
super().__init__(mix_wts, components, validate_args=False)
class DiscretizedGMM(DiscretizedCtsDistribution):
def __init__(self, params, num_bins, clip=False, min_std_dev=1e-3, max_std_dev=10, min_prob=1e-5, log_dev=True):
assert params.size(-1) % 3 == 0
if min_std_dev < 0:
min_std_dev = 1.0 / (num_bins * 5)
mix_wt_logits, means, std_devs = params.chunk(3, -1)
if log_dev:
std_devs = safe_exp(std_devs)
std_devs = std_devs.clamp(min=min_std_dev, max=max_std_dev)
super().__init__(
cts_dist=GMM(mix_wt_logits, means, std_devs),
num_bins=num_bins,
device=params.device,
batch_dims=params.ndim - 1,
clip=clip,
min_prob=min_prob,
)
class DiscretizedNormal(DiscretizedCtsDistribution):
def __init__(self, params, num_bins, clip=False, min_std_dev=1e-3, max_std_dev=10, min_prob=1e-5, log_dev=True):
assert params.size(-1) == 2
if min_std_dev < 0:
min_std_dev = 1.0 / (num_bins * 5)
mean, std_dev = params.split(1, -1)[:2]
if log_dev:
std_dev = safe_exp(std_dev)
std_dev = std_dev.clamp(min=min_std_dev, max=max_std_dev)
super().__init__(
cts_dist=Normal(mean.squeeze(-1), std_dev.squeeze(-1), validate_args=False),
num_bins=num_bins,
device=params.device,
batch_dims=params.ndim - 1,
clip=clip,
min_prob=min_prob,
)
class Bernoulli(DiscreteDistribution):
def __init__(self, logits):
self.bernoulli = torch_Bernoulli(logits=logits, validate_args=False)
@functools.cached_property
def probs(self):
p = self.bernoulli.probs.unsqueeze(-1)
return torch.cat([1 - p, p], -1)
@functools.cached_property
def mode(self):
return self.bernoulli.mode
def log_prob(self, x):
return self.bernoulli.log_prob(x.float())
def sample(self, sample_shape=torch.Size([])):
return self.bernoulli.sample(sample_shape)
class DiscretizedBernoulli(DiscretizedDistribution):
def __init__(self, logits):
super().__init__(2, logits.device)
self.bernoulli = torch_Bernoulli(logits=logits, validate_args=False)
@functools.cached_property
def probs(self):
p = self.bernoulli.probs.unsqueeze(-1)
return torch.cat([1 - p, p], -1)
@functools.cached_property
def mode(self):
return idx_to_float(self.bernoulli.mode, 2)
def log_prob(self, x):
return self.bernoulli.log_prob(float_to_idx(x, 2).float())
def sample(self, sample_shape=torch.Size([])):
return idx_to_float(self.bernoulli.sample(sample_shape), 2)
class DeltaDistribution(CtsDistribution):
def __init__(self, mean, clip_range=1.0):
if clip_range > 0:
mean = mean.clip(min=-clip_range, max=clip_range)
self.mean = mean