Spaces:
Build error
Build error
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| import numpy as np | |
| from scipy.cluster.vq import kmeans2 | |
| from torch import einsum | |
| from einops import rearrange | |
| import torch.distributed as dist | |
| class VectorQuantizer(nn.Module): | |
| """ | |
| see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py | |
| ____________________________________________ | |
| Discretization bottleneck part of the VQ-VAE. | |
| Inputs: | |
| - n_e : number of embeddings | |
| - e_dim : dimension of embedding | |
| - beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2 | |
| _____________________________________________ | |
| """ | |
| # NOTE: this class contains a bug regarding beta; see VectorQuantizer2 for | |
| # a fix and use legacy=False to apply that fix. VectorQuantizer2 can be | |
| # used wherever VectorQuantizer has been used before and is additionally | |
| # more efficient. | |
| def __init__(self, n_e, e_dim, beta): | |
| super(VectorQuantizer, self).__init__() | |
| self.n_e = n_e | |
| self.e_dim = e_dim | |
| self.beta = beta | |
| self.embedding = nn.Embedding(self.n_e, self.e_dim) | |
| self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) | |
| def forward(self, z): | |
| """ | |
| Inputs the output of the encoder network z and maps it to a discrete | |
| one-hot vector that is the index of the closest embedding vector e_j | |
| z (continuous) -> z_q (discrete) | |
| z.shape = (batch, channel, height, width) | |
| quantization pipeline: | |
| 1. get encoder input (B,C,H,W) | |
| 2. flatten input to (B*H*W,C) | |
| """ | |
| # reshape z -> (batch, height, width, channel) and flatten | |
| z = z.permute(0, 2, 3, 1).contiguous() | |
| z_flattened = z.view(-1, self.e_dim) | |
| # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z | |
| d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ | |
| torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \ | |
| torch.matmul(z_flattened, self.embedding.weight.t()) | |
| ## could possible replace this here | |
| # #\start... | |
| # find closest encodings | |
| min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1) | |
| min_encodings = torch.zeros( | |
| min_encoding_indices.shape[0], self.n_e).to(z) | |
| min_encodings.scatter_(1, min_encoding_indices, 1) | |
| # dtype min encodings: torch.float32 | |
| # min_encodings shape: torch.Size([2048, 512]) | |
| # min_encoding_indices.shape: torch.Size([2048, 1]) | |
| # get quantized latent vectors | |
| z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape) | |
| # .........\end | |
| # with: | |
| # .........\start | |
| # min_encoding_indices = torch.argmin(d, dim=1) | |
| # z_q = self.embedding(min_encoding_indices) | |
| # ......\end......... (TODO) | |
| # compute loss for embedding | |
| loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \ | |
| torch.mean((z_q - z.detach()) ** 2) | |
| # preserve gradients | |
| z_q = z + (z_q - z).detach() | |
| # perplexity | |
| e_mean = torch.mean(min_encodings, dim=0) | |
| perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10))) | |
| # reshape back to match original input shape | |
| z_q = z_q.permute(0, 3, 1, 2).contiguous() | |
| return z_q, loss, (perplexity, min_encodings, min_encoding_indices) | |
| def get_codebook_entry(self, indices, shape): | |
| # shape specifying (batch, height, width, channel) | |
| # TODO: check for more easy handling with nn.Embedding | |
| min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices) | |
| min_encodings.scatter_(1, indices[:, None], 1) | |
| # get quantized latent vectors | |
| z_q = torch.matmul(min_encodings.float(), self.embedding.weight) | |
| if shape is not None: | |
| z_q = z_q.view(shape) | |
| # reshape back to match original input shape | |
| z_q = z_q.permute(0, 3, 1, 2).contiguous() | |
| return z_q | |
| class GumbelQuantize(nn.Module): | |
| """ | |
| credit to @karpathy: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py (thanks!) | |
| Gumbel Softmax trick quantizer | |
| Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016 | |
| https://arxiv.org/abs/1611.01144 | |
| """ | |
| def __init__(self, num_hiddens, embedding_dim, n_embed, straight_through=True, | |
| kl_weight=5e-4, temp_init=1.0, use_vqinterface=True, | |
| remap=None, unknown_index="random"): | |
| super().__init__() | |
| self.embedding_dim = embedding_dim | |
| self.n_embed = n_embed | |
| self.straight_through = straight_through | |
| self.temperature = temp_init | |
| self.kl_weight = kl_weight | |
| self.proj = nn.Conv2d(num_hiddens, n_embed, 1) | |
| self.embed = nn.Embedding(n_embed, embedding_dim) | |
| self.use_vqinterface = use_vqinterface | |
| self.remap = remap | |
| if self.remap is not None: | |
| self.register_buffer("used", torch.tensor(np.load(self.remap))) | |
| self.re_embed = self.used.shape[0] | |
| self.unknown_index = unknown_index # "random" or "extra" or integer | |
| if self.unknown_index == "extra": | |
| self.unknown_index = self.re_embed | |
| self.re_embed = self.re_embed + 1 | |
| print(f"Remapping {self.n_embed} indices to {self.re_embed} indices. " | |
| f"Using {self.unknown_index} for unknown indices.") | |
| else: | |
| self.re_embed = n_embed | |
| def remap_to_used(self, inds): | |
| ishape = inds.shape | |
| assert len(ishape) > 1 | |
| inds = inds.reshape(ishape[0], -1) | |
| used = self.used.to(inds) | |
| match = (inds[:, :, None] == used[None, None, ...]).long() | |
| new = match.argmax(-1) | |
| unknown = match.sum(2) < 1 | |
| if self.unknown_index == "random": | |
| new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device) | |
| else: | |
| new[unknown] = self.unknown_index | |
| return new.reshape(ishape) | |
| def unmap_to_all(self, inds): | |
| ishape = inds.shape | |
| assert len(ishape) > 1 | |
| inds = inds.reshape(ishape[0], -1) | |
| used = self.used.to(inds) | |
| if self.re_embed > self.used.shape[0]: # extra token | |
| inds[inds >= self.used.shape[0]] = 0 # simply set to zero | |
| back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds) | |
| return back.reshape(ishape) | |
| def forward(self, z, temp=None, return_logits=False): | |
| # force hard = True when we are in eval mode, as we must quantize. actually, always true seems to work | |
| hard = self.straight_through if self.training else True | |
| temp = self.temperature if temp is None else temp | |
| logits = self.proj(z) | |
| if self.remap is not None: | |
| # continue only with used logits | |
| full_zeros = torch.zeros_like(logits) | |
| logits = logits[:, self.used, ...] | |
| soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=hard) | |
| if self.remap is not None: | |
| # go back to all entries but unused set to zero | |
| full_zeros[:, self.used, ...] = soft_one_hot | |
| soft_one_hot = full_zeros | |
| z_q = einsum('b n h w, n d -> b d h w', soft_one_hot, self.embed.weight) | |
| # + kl divergence to the prior loss | |
| qy = F.softmax(logits, dim=1) | |
| diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.n_embed + 1e-10), dim=1).mean() | |
| ind = soft_one_hot.argmax(dim=1) | |
| if self.remap is not None: | |
| ind = self.remap_to_used(ind) | |
| if self.use_vqinterface: | |
| if return_logits: | |
| return z_q, diff, (None, None, ind), logits | |
| return z_q, diff, (None, None, ind) | |
| return z_q, diff, ind | |
| def get_codebook_entry(self, indices, shape): | |
| b, h, w, c = shape | |
| assert b * h * w == indices.shape[0] | |
| indices = rearrange(indices, '(b h w) -> b h w', b=b, h=h, w=w) | |
| if self.remap is not None: | |
| indices = self.unmap_to_all(indices) | |
| one_hot = F.one_hot(indices, num_classes=self.n_embed).permute(0, 3, 1, 2).float() | |
| z_q = einsum('b n h w, n d -> b d h w', one_hot, self.embed.weight) | |
| return z_q | |
| class VectorQuantizer2(nn.Module): | |
| """ | |
| Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly | |
| avoids costly matrix multiplications and allows for post-hoc remapping of indices. | |
| """ | |
| # NOTE: due to a bug the beta term was applied to the wrong term. for | |
| # backwards compatibility we use the buggy version by default, but you can | |
| # specify legacy=False to fix it. | |
| def __init__(self, n_e, e_dim, beta, legacy=False): | |
| super().__init__() | |
| self.n_e = n_e | |
| self.e_dim = e_dim | |
| self.beta = beta | |
| self.legacy = legacy | |
| self.embedding = nn.Embedding(self.n_e, self.e_dim) | |
| self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) | |
| self.re_embed = n_e | |
| def encode(self, z): | |
| B, T, _ = z.shape | |
| z_flattened = z.reshape(-1, self.e_dim) | |
| d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ | |
| torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \ | |
| torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n')) | |
| min_encoding_indices = torch.argmin(d, dim=1) | |
| z_q = self.embedding(min_encoding_indices).reshape(z.shape) | |
| z_q = z_q.view_as(z) | |
| min_encoding_indices = min_encoding_indices.reshape(z.shape[:-1]) | |
| return z_flattened, z_q, min_encoding_indices | |
| def forward(self, z, mask=None, temp=None, rescale_logits=False, return_logits=False): | |
| if mask is not None: | |
| assert mask.shape[:2] == z.shape[:2], (mask.shape, z.shape) | |
| assert mask.shape[-1] == 1, (mask.shape,) | |
| z = z * mask | |
| assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel" | |
| assert rescale_logits == False, "Only for interface compatible with Gumbel" | |
| assert return_logits == False, "Only for interface compatible with Gumbel" | |
| # reshape z -> (batch, height, width, channel) and flatten | |
| # z = rearrange(z, 'b c h w -> b h w c').contiguous() | |
| assert z.shape[-1] == self.e_dim | |
| z_flattened = z.reshape(-1, self.e_dim) | |
| # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z | |
| d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ | |
| torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \ | |
| torch.matmul(z_flattened, rearrange(self.embedding.weight, 'n d -> d n')) | |
| #torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n')) | |
| min_encoding_indices = torch.argmin(d, dim=1) | |
| z_q = self.embedding(min_encoding_indices).reshape(z.shape) | |
| perplexity = None | |
| # compute loss for embedding | |
| if not self.legacy: | |
| loss = self.beta * (z_q.detach() - z) ** 2 + \ | |
| (z_q - z.detach()) ** 2 | |
| else: | |
| loss = (z_q.detach() - z) ** 2 + self.beta * \ | |
| (z_q - z.detach()) ** 2 | |
| # preserve gradients | |
| z_q = z + (z_q - z).detach() | |
| min_encoding_indices = min_encoding_indices.reshape(z.shape[:-1]) | |
| if mask is not None: | |
| loss = (loss * mask).sum() / mask.sum() / self.e_dim | |
| else: | |
| loss = loss.mean() | |
| return z_q, loss, min_encoding_indices, perplexity | |
| def get_codebook_entry(self, indices, shape=None): | |
| # get quantized latent vectors | |
| z_q = self.embedding(indices) | |
| if shape is not None: | |
| z_q = z_q.view(shape) | |
| # reshape back to match original input shape | |
| z_q = z_q.permute(0, 3, 1, 2).contiguous() | |
| return z_q | |
| class VectorQuantizer4(nn.Module): | |
| def __init__(self, n_e, e_dim, beta, legacy=False, kmeans_reset_every=1000): | |
| super().__init__() | |
| self.n_e = n_e | |
| self.e_dim = e_dim | |
| self.beta = beta | |
| self.legacy = legacy | |
| self.embedding = nn.Embedding(self.n_e, self.e_dim) | |
| self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) | |
| self.re_embed = n_e | |
| self.reset_every = kmeans_reset_every | |
| self.reset_thres = 20 | |
| self.z_buffer = [] | |
| self.register_buffer('use_flag', torch.zeros(n_e)) | |
| self.register_buffer('steps', torch.zeros(1)) | |
| def encode(self, z): | |
| B, T, _ = z.shape | |
| z_flattened = z.reshape(-1, self.e_dim) | |
| d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ | |
| torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \ | |
| torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n')) | |
| min_encoding_indices = torch.argmin(d, dim=1) | |
| z_q = self.embedding(min_encoding_indices).reshape(z.shape) | |
| z_q = z_q.view_as(z) | |
| min_encoding_indices = min_encoding_indices.reshape(z.shape[:-1]) | |
| return z_flattened, z_q, min_encoding_indices | |
| def forward(self, z, mask=None, temp=None, rescale_logits=False, return_logits=False): | |
| if mask is not None: | |
| assert mask.shape[:2] == z.shape[:2], (mask.shape, z.shape) | |
| assert mask.shape[-1] == 1, (mask.shape,) | |
| z = z * mask | |
| assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel" | |
| assert rescale_logits == False, "Only for interface compatible with Gumbel" | |
| assert return_logits == False, "Only for interface compatible with Gumbel" | |
| # reshape z -> (batch, height, width, channel) and flatten | |
| # z = rearrange(z, 'b c h w -> b h w c').contiguous() | |
| assert z.shape[-1] == self.e_dim | |
| z_flattened = z.reshape(-1, self.e_dim) | |
| # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z | |
| d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ | |
| torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \ | |
| torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n')) | |
| min_encoding_indices = torch.argmin(d, dim=1) | |
| z_q = self.embedding(min_encoding_indices).reshape(z.shape) | |
| perplexity = None | |
| if self.training: | |
| self.steps += 1 | |
| self.use_flag += torch.bincount(min_encoding_indices, minlength=self.n_e) | |
| is_master = not dist.is_initialized() or dist.get_rank() == 0 | |
| if self.reset_every - 100 <= self.steps <= self.reset_every: | |
| if dist.is_initialized(): | |
| z_buffer_ = [None for _ in range(dist.get_world_size())] | |
| dist.all_gather_object(z_buffer_, z_flattened.detach().cpu()) | |
| else: | |
| z_buffer_ = [z_flattened.detach().cpu()] | |
| self.z_buffer += z_buffer_ | |
| if self.steps % self.reset_every == 0: | |
| if dist.is_initialized(): | |
| dist.all_reduce(self.use_flag) | |
| vq_usage = (self.use_flag > self.reset_thres).sum().item() / self.use_flag.shape[0] | |
| print("| VQ usage: ", vq_usage) | |
| if vq_usage != 1: | |
| if is_master: | |
| if self.steps.item() == self.reset_every: | |
| print('| running kmeans in VQVAE') # data driven initialization for the embeddings | |
| z_buffer = torch.cat(self.z_buffer, 0) | |
| rp = torch.randperm(z_buffer.shape[0]) | |
| kd = kmeans2(z_buffer[rp].numpy(), self.n_e, minit='points')[0] | |
| self.embedding.weight.data = torch.from_numpy(kd).to(z.device) | |
| else: | |
| reset_ids = self.use_flag < self.reset_thres | |
| keep_ids = self.use_flag >= self.reset_thres | |
| t = torch.randint(0, keep_ids.sum(), [reset_ids.sum()], device=self.use_flag.device) | |
| keep_ids = torch.where(keep_ids)[0][t] | |
| self.embedding.weight.data[reset_ids] = self.embedding.weight.data[keep_ids].clone() | |
| if dist.is_initialized(): | |
| dist.broadcast(self.embedding.weight.data, 0) | |
| # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z | |
| d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ | |
| torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \ | |
| torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n')) | |
| min_encoding_indices = torch.argmin(d, dim=1) | |
| z_q = self.embedding(min_encoding_indices).reshape(z.shape) | |
| self.use_flag.fill_(0) | |
| self.z_buffer = [] | |
| # compute loss for embedding | |
| if not self.legacy: | |
| loss = self.beta * (z_q.detach() - z) ** 2 + \ | |
| (z_q - z.detach()) ** 2 | |
| else: | |
| loss = (z_q.detach() - z) ** 2 + self.beta * \ | |
| (z_q - z.detach()) ** 2 | |
| # preserve gradients | |
| z_q = z + (z_q - z).detach() | |
| min_encoding_indices = min_encoding_indices.reshape(z.shape[:-1]) | |
| if mask is not None: | |
| loss = (loss * mask).sum() / mask.sum() / self.e_dim | |
| else: | |
| loss = loss.mean() | |
| return z_q, loss, min_encoding_indices, perplexity | |
| def get_codebook_entry(self, indices, shape=None): | |
| # get quantized latent vectors | |
| z_q = self.embedding(indices) | |
| if shape is not None: | |
| z_q = z_q.view(shape) | |
| # reshape back to match original input shape | |
| z_q = z_q.permute(0, 3, 1, 2).contiguous() | |
| return z_q | |