diff --git a/motion_diffusion_model/data_loaders/a2m/dataset.py b/motion_diffusion_model/data_loaders/a2m/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..b34ce34362f98f3d0158cb0ca2912df35f512bd5 --- /dev/null +++ b/motion_diffusion_model/data_loaders/a2m/dataset.py @@ -0,0 +1,255 @@ +import random + +import numpy as np +import torch +# from utils.action_label_to_idx import action_label_to_idx +from data_loaders.tensors import collate +from utils.misc import to_torch +import utils.rotation_conversions as geometry + +class Dataset(torch.utils.data.Dataset): + def __init__(self, num_frames=1, sampling="conseq", sampling_step=1, split="train", + pose_rep="rot6d", translation=True, glob=True, max_len=-1, min_len=-1, num_seq_max=-1, **kwargs): + self.num_frames = num_frames + self.sampling = sampling + self.sampling_step = sampling_step + self.split = split + self.pose_rep = pose_rep + self.translation = translation + self.glob = glob + self.max_len = max_len + self.min_len = min_len + self.num_seq_max = num_seq_max + + self.align_pose_frontview = kwargs.get('align_pose_frontview', False) + self.use_action_cat_as_text_labels = kwargs.get('use_action_cat_as_text_labels', False) + self.only_60_classes = kwargs.get('only_60_classes', False) + self.leave_out_15_classes = kwargs.get('leave_out_15_classes', False) + self.use_only_15_classes = kwargs.get('use_only_15_classes', False) + + if self.split not in ["train", "val", "test"]: + raise ValueError(f"{self.split} is not a valid split") + + super().__init__() + + # to remove shuffling + self._original_train = None + self._original_test = None + + def action_to_label(self, action): + return self._action_to_label[action] + + def label_to_action(self, label): + import numbers + if isinstance(label, numbers.Integral): + return self._label_to_action[label] + else: # if it is one hot vector + label = np.argmax(label) + return self._label_to_action[label] + + def get_pose_data(self, data_index, frame_ix): + pose = self._load(data_index, frame_ix) + label = self.get_label(data_index) + return pose, label + + def get_label(self, ind): + action = self.get_action(ind) + return self.action_to_label(action) + + def get_action(self, ind): + return self._actions[ind] + + def action_to_action_name(self, action): + return self._action_classes[action] + + def action_name_to_action(self, action_name): + # self._action_classes is either a list or a dictionary. If it's a dictionary, we 1st convert it to a list + all_action_names = self._action_classes + if isinstance(all_action_names, dict): + all_action_names = list(all_action_names.values()) + assert list(self._action_classes.keys()) == list(range(len(all_action_names))) # the keys should be ordered from 0 to num_actions + + sorter = np.argsort(all_action_names) + actions = sorter[np.searchsorted(all_action_names, action_name, sorter=sorter)] + return actions + + def __getitem__(self, index): + if self.split == 'train': + data_index = self._train[index] + else: + data_index = self._test[index] + + # inp, target = self._get_item_data_index(data_index) + # return inp, target + return self._get_item_data_index(data_index) + + def _load(self, ind, frame_ix): + pose_rep = self.pose_rep + if pose_rep == "xyz" or self.translation: + if getattr(self, "_load_joints3D", None) is not None: + # Locate the root joint of initial pose at origin + joints3D = self._load_joints3D(ind, frame_ix) + joints3D = joints3D - joints3D[0, 0, :] + ret = to_torch(joints3D) + if self.translation: + ret_tr = ret[:, 0, :] + else: + if pose_rep == "xyz": + raise ValueError("This representation is not possible.") + if getattr(self, "_load_translation") is None: + raise ValueError("Can't extract translations.") + ret_tr = self._load_translation(ind, frame_ix) + ret_tr = to_torch(ret_tr - ret_tr[0]) + + if pose_rep != "xyz": + if getattr(self, "_load_rotvec", None) is None: + raise ValueError("This representation is not possible.") + else: + pose = self._load_rotvec(ind, frame_ix) + if not self.glob: + pose = pose[:, 1:, :] + pose = to_torch(pose) + if self.align_pose_frontview: + first_frame_root_pose_matrix = geometry.axis_angle_to_matrix(pose[0][0]) + all_root_poses_matrix = geometry.axis_angle_to_matrix(pose[:, 0, :]) + aligned_root_poses_matrix = torch.matmul(torch.transpose(first_frame_root_pose_matrix, 0, 1), + all_root_poses_matrix) + pose[:, 0, :] = geometry.matrix_to_axis_angle(aligned_root_poses_matrix) + + if self.translation: + ret_tr = torch.matmul(torch.transpose(first_frame_root_pose_matrix, 0, 1).float(), + torch.transpose(ret_tr, 0, 1)) + ret_tr = torch.transpose(ret_tr, 0, 1) + + if pose_rep == "rotvec": + ret = pose + elif pose_rep == "rotmat": + ret = geometry.axis_angle_to_matrix(pose).view(*pose.shape[:2], 9) + elif pose_rep == "rotquat": + ret = geometry.axis_angle_to_quaternion(pose) + elif pose_rep == "rot6d": + ret = geometry.matrix_to_rotation_6d(geometry.axis_angle_to_matrix(pose)) + if pose_rep != "xyz" and self.translation: + padded_tr = torch.zeros((ret.shape[0], ret.shape[2]), dtype=ret.dtype) + padded_tr[:, :3] = ret_tr + ret = torch.cat((ret, padded_tr[:, None]), 1) + ret = ret.permute(1, 2, 0).contiguous() + return ret.float() + + def _get_item_data_index(self, data_index): + nframes = self._num_frames_in_video[data_index] + + if self.num_frames == -1 and (self.max_len == -1 or nframes <= self.max_len): + frame_ix = np.arange(nframes) + else: + if self.num_frames == -2: + if self.min_len <= 0: + raise ValueError("You should put a min_len > 0 for num_frames == -2 mode") + if self.max_len != -1: + max_frame = min(nframes, self.max_len) + else: + max_frame = nframes + + num_frames = random.randint(self.min_len, max(max_frame, self.min_len)) + else: + num_frames = self.num_frames if self.num_frames != -1 else self.max_len + + if num_frames > nframes: + fair = False # True + if fair: + # distills redundancy everywhere + choices = np.random.choice(range(nframes), + num_frames, + replace=True) + frame_ix = sorted(choices) + else: + # adding the last frame until done + ntoadd = max(0, num_frames - nframes) + lastframe = nframes - 1 + padding = lastframe * np.ones(ntoadd, dtype=int) + frame_ix = np.concatenate((np.arange(0, nframes), + padding)) + + elif self.sampling in ["conseq", "random_conseq"]: + step_max = (nframes - 1) // (num_frames - 1) + if self.sampling == "conseq": + if self.sampling_step == -1 or self.sampling_step * (num_frames - 1) >= nframes: + step = step_max + else: + step = self.sampling_step + elif self.sampling == "random_conseq": + step = random.randint(1, step_max) + + lastone = step * (num_frames - 1) + shift_max = nframes - lastone - 1 + shift = random.randint(0, max(0, shift_max - 1)) + frame_ix = shift + np.arange(0, lastone + 1, step) + + elif self.sampling == "random": + choices = np.random.choice(range(nframes), + num_frames, + replace=False) + frame_ix = sorted(choices) + + else: + raise ValueError("Sampling not recognized.") + + inp, action = self.get_pose_data(data_index, frame_ix) + + + output = {'inp': inp, 'action': action} + + if hasattr(self, '_actions') and hasattr(self, '_action_classes'): + output['action_text'] = self.action_to_action_name(self.get_action(data_index)) + + return output + + + def get_mean_length_label(self, label): + if self.num_frames != -1: + return self.num_frames + + if self.split == 'train': + index = self._train + else: + index = self._test + + action = self.label_to_action(label) + choices = np.argwhere(self._actions[index] == action).squeeze(1) + lengths = self._num_frames_in_video[np.array(index)[choices]] + + if self.max_len == -1: + return np.mean(lengths) + else: + # make the lengths less than max_len + lengths[lengths > self.max_len] = self.max_len + return np.mean(lengths) + + def __len__(self): + num_seq_max = getattr(self, "num_seq_max", -1) + if num_seq_max == -1: + from math import inf + num_seq_max = inf + + if self.split == 'train': + return min(len(self._train), num_seq_max) + else: + return min(len(self._test), num_seq_max) + + def shuffle(self): + if self.split == 'train': + random.shuffle(self._train) + else: + random.shuffle(self._test) + + def reset_shuffle(self): + if self.split == 'train': + if self._original_train is None: + self._original_train = self._train + else: + self._train = self._original_train + else: + if self._original_test is None: + self._original_test = self._test + else: + self._test = self._original_test diff --git a/motion_diffusion_model/data_loaders/a2m/humanact12poses.py b/motion_diffusion_model/data_loaders/a2m/humanact12poses.py new file mode 100644 index 0000000000000000000000000000000000000000..d9b8894a5e7435f0f35aee1d326fead5a3123bae --- /dev/null +++ b/motion_diffusion_model/data_loaders/a2m/humanact12poses.py @@ -0,0 +1,57 @@ +import pickle as pkl +import numpy as np +import os +from .dataset import Dataset + + +class HumanAct12Poses(Dataset): + dataname = "humanact12" + + def __init__(self, datapath="dataset/HumanAct12Poses", split="train", **kargs): + self.datapath = datapath + + super().__init__(**kargs) + + pkldatafilepath = os.path.join(datapath, "humanact12poses.pkl") + data = pkl.load(open(pkldatafilepath, "rb")) + + self._pose = [x for x in data["poses"]] + self._num_frames_in_video = [p.shape[0] for p in self._pose] + self._joints = [x for x in data["joints3D"]] + + self._actions = [x for x in data["y"]] + + total_num_actions = 12 + self.num_actions = total_num_actions + + self._train = list(range(len(self._pose))) + + keep_actions = np.arange(0, total_num_actions) + + self._action_to_label = {x: i for i, x in enumerate(keep_actions)} + self._label_to_action = {i: x for i, x in enumerate(keep_actions)} + + self._action_classes = humanact12_coarse_action_enumerator + + def _load_joints3D(self, ind, frame_ix): + return self._joints[ind][frame_ix] + + def _load_rotvec(self, ind, frame_ix): + pose = self._pose[ind][frame_ix].reshape(-1, 24, 3) + return pose + + +humanact12_coarse_action_enumerator = { + 0: "warm_up", + 1: "walk", + 2: "run", + 3: "jump", + 4: "drink", + 5: "lift_dumbbell", + 6: "sit", + 7: "eat", + 8: "turn steering wheel", + 9: "phone", + 10: "boxing", + 11: "throw", +} diff --git a/motion_diffusion_model/data_loaders/a2m/uestc.py b/motion_diffusion_model/data_loaders/a2m/uestc.py new file mode 100644 index 0000000000000000000000000000000000000000..e818b9831f587b360cf90f134074855ee5100484 --- /dev/null +++ b/motion_diffusion_model/data_loaders/a2m/uestc.py @@ -0,0 +1,226 @@ +import os +from tqdm import tqdm +import numpy as np +import pickle as pkl +import utils.rotation_conversions as geometry +import torch + +from .dataset import Dataset +# from torch.utils.data import Dataset + +action2motion_joints = [8, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 12, 13, 14, 21, 24, 38] + + +def get_z(cam_s, cam_pos, joints, img_size, flength): + """ + Solves for the depth offset of the model to approx. orth with persp camera. + """ + # Translate the model itself: Solve the best z that maps to orth_proj points + joints_orth_target = (cam_s * (joints[:, :2] + cam_pos) + 1) * 0.5 * img_size + height3d = np.linalg.norm(np.max(joints[:, :2], axis=0) - np.min(joints[:, :2], axis=0)) + height2d = np.linalg.norm(np.max(joints_orth_target, axis=0) - np.min(joints_orth_target, axis=0)) + tz = np.array(flength * (height3d / height2d)) + return float(tz) + + +def get_trans_from_vibe(vibe, index, use_z=True): + alltrans = [] + for t in range(vibe["joints3d"][index].shape[0]): + # Convert crop cam to orig cam + # No need! Because `convert_crop_cam_to_orig_img` from demoutils of vibe + # does this already for us :) + # Its format is: [sx, sy, tx, ty] + cam_orig = vibe["orig_cam"][index][t] + x = cam_orig[2] + y = cam_orig[3] + if use_z: + z = get_z(cam_s=cam_orig[0], # TODO: There are two scales instead of 1. + cam_pos=cam_orig[2:4], + joints=vibe['joints3d'][index][t], + img_size=540, + flength=500) + # z = 500 / (0.5 * 480 * cam_orig[0]) + else: + z = 0 + trans = [x, y, z] + alltrans.append(trans) + alltrans = np.array(alltrans) + return alltrans - alltrans[0] + + +class UESTC(Dataset): + dataname = "uestc" + + def __init__(self, datapath="dataset/uestc", method_name="vibe", view="all", **kargs): + + self.datapath = datapath + self.method_name = method_name + self.view = view + super().__init__(**kargs) + + # Load pre-computed #frames data + with open(os.path.join(datapath, 'info', 'num_frames_min.txt'), 'r') as f: + num_frames_video = np.asarray([int(s) for s in f.read().splitlines()]) + + # Out of 118 subjects -> 51 training, 67 in test + all_subjects = np.arange(1, 119) + self._tr_subjects = [ + 1, 2, 6, 12, 13, 16, 21, 24, 28, 29, 30, 31, 33, 35, 39, 41, 42, 45, 47, 50, + 52, 54, 55, 57, 59, 61, 63, 64, 67, 69, 70, 71, 73, 77, 81, 84, 86, 87, 88, + 90, 91, 93, 96, 99, 102, 103, 104, 107, 108, 112, 113] + self._test_subjects = [s for s in all_subjects if s not in self._tr_subjects] + + # Load names of 25600 videos + with open(os.path.join(datapath, 'info', 'names.txt'), 'r') as f: + videos = f.read().splitlines() + + self._videos = videos + + if self.method_name == "vibe": + vibe_data_path = os.path.join(datapath, "vibe_cache_refined.pkl") + vibe_data = pkl.load(open(vibe_data_path, "rb")) + + self._pose = vibe_data["pose"] + num_frames_method = [p.shape[0] for p in self._pose] + globpath = os.path.join(datapath, "globtrans_usez.pkl") + + if os.path.exists(globpath): + self._globtrans = pkl.load(open(globpath, "rb")) + else: + self._globtrans = [] + for index in tqdm(range(len(self._pose))): + self._globtrans.append(get_trans_from_vibe(vibe_data, index, use_z=True)) + pkl.dump(self._globtrans, open("globtrans_usez.pkl", "wb")) + self._joints = vibe_data["joints3d"] + self._jointsIx = action2motion_joints + else: + raise ValueError("This method name is not recognized.") + + num_frames_video = np.minimum(num_frames_video, num_frames_method) + num_frames_video = num_frames_video.astype(int) + self._num_frames_in_video = [x for x in num_frames_video] + + N = len(videos) + self._actions = np.zeros(N, dtype=int) + for ind in range(N): + self._actions[ind] = self.parse_action(videos[ind]) + + self._actions = [x for x in self._actions] + + total_num_actions = 40 + self.num_actions = total_num_actions + keep_actions = np.arange(0, total_num_actions) + + self._action_to_label = {x: i for i, x in enumerate(keep_actions)} + self._label_to_action = {i: x for i, x in enumerate(keep_actions)} + self.num_classes = len(keep_actions) + + self._train = [] + self._test = [] + + self.info_actions = [] + + def get_rotation(view): + theta = - view * np.pi/4 + axis = torch.tensor([0, 1, 0], dtype=torch.float) + axisangle = theta*axis + matrix = geometry.axis_angle_to_matrix(axisangle) + return matrix + + # 0 is identity if needed + rotations = {key: get_rotation(key) for key in [0, 1, 2, 3, 4, 5, 6, 7]} + + for index, video in enumerate(tqdm(videos, desc='Preparing UESTC data..')): + act, view, subject, side = self._get_action_view_subject_side(video) + self.info_actions.append({"action": act, + "view": view, + "subject": subject, + "side": side}) + if self.view == "frontview": + if side != 1: + continue + # rotate to front view + if side != 1: + # don't take the view 8 in side 2 + if view == 8: + continue + rotation = rotations[view] + global_matrix = geometry.axis_angle_to_matrix(torch.from_numpy(self._pose[index][:, :3])) + # rotate the global pose + self._pose[index][:, :3] = geometry.matrix_to_axis_angle(rotation @ global_matrix).numpy() + # rotate the joints + self._joints[index] = self._joints[index] @ rotation.T.numpy() + self._globtrans[index] = (self._globtrans[index] @ rotation.T.numpy()) + + # add the global translation to the joints + self._joints[index] = self._joints[index] + self._globtrans[index][:, None] + + if subject in self._tr_subjects: + self._train.append(index) + elif subject in self._test_subjects: + self._test.append(index) + else: + raise ValueError("This subject doesn't belong to any set.") + + # if index > 200: + # break + + # Select only sequences which have a minimum number of frames + if self.num_frames > 0: + threshold = self.num_frames*3/4 + else: + threshold = 0 + + method_extracted_ix = np.where(num_frames_video >= threshold)[0].tolist() + self._train = list(set(self._train) & set(method_extracted_ix)) + # keep the test set without modification + self._test = list(set(self._test)) + + action_classes_file = os.path.join(datapath, "info/action_classes.txt") + with open(action_classes_file, 'r') as f: + self._action_classes = np.array(f.read().splitlines()) + + # with open(processd_path, 'wb') as file: + # pkl.dump(xxx, file) + + def _load_joints3D(self, ind, frame_ix): + if len(self._joints[ind]) == 0: + raise ValueError( + f"Cannot load index {ind} in _load_joints3D function.") + if self._jointsIx is not None: + joints3D = self._joints[ind][frame_ix][:, self._jointsIx] + else: + joints3D = self._joints[ind][frame_ix] + + return joints3D + + def _load_rotvec(self, ind, frame_ix): + # 72 dim smpl + pose = self._pose[ind][frame_ix, :].reshape(-1, 24, 3) + return pose + + def _get_action_view_subject_side(self, videopath): + # TODO: Can be moved to tools.py + spl = videopath.split('_') + action = int(spl[0][1:]) + view = int(spl[1][1:]) + subject = int(spl[2][1:]) + side = int(spl[3][1:]) + return action, view, subject, side + + def _get_videopath(self, action, view, subject, side): + # Unused function + return 'a{:d}_d{:d}_p{:03d}_c{:d}_color.avi'.format( + action, view, subject, side) + + def parse_action(self, path, return_int=True): + # Override parent method + info, _, _, _ = self._get_action_view_subject_side(path) + if return_int: + return int(info) + else: + return info + + +if __name__ == "__main__": + dataset = UESTC() diff --git a/motion_diffusion_model/data_loaders/get_data.py b/motion_diffusion_model/data_loaders/get_data.py new file mode 100644 index 0000000000000000000000000000000000000000..b750183feb517b75f8cd9b6efb1a4fa160282c2c --- /dev/null +++ b/motion_diffusion_model/data_loaders/get_data.py @@ -0,0 +1,59 @@ +from torch.utils.data import DataLoader +from data_loaders.tensors import collate as all_collate +from data_loaders.tensors import t2m_collate, t2m_prefix_collate + +def get_dataset_class(name): + if name == "amass": + from .amass import AMASS + return AMASS + elif name == "uestc": + from .a2m.uestc import UESTC + return UESTC + elif name == "humanact12": + from .a2m.humanact12poses import HumanAct12Poses + return HumanAct12Poses + elif name == "humanml": + from data_loaders.humanml.data.dataset import HumanML3D + return HumanML3D + elif name == "kit": + from data_loaders.humanml.data.dataset import KIT + return KIT + else: + raise ValueError(f'Unsupported dataset name [{name}]') + +def get_collate_fn(name, hml_mode='train', pred_len=0, batch_size=1): + if hml_mode == 'gt': + from data_loaders.humanml.data.dataset import collate_fn as t2m_eval_collate + return t2m_eval_collate + if name in ["humanml", "kit"]: + if pred_len > 0: + return lambda x: t2m_prefix_collate(x, pred_len=pred_len) + return lambda x: t2m_collate(x, batch_size) + else: + return all_collate + + +def get_dataset(name, num_frames, split='train', hml_mode='train', abs_path='.', fixed_len=0, + device=None, autoregressive=False, cache_path=None): + DATA = get_dataset_class(name) + if name in ["humanml", "kit"]: + dataset = DATA(split=split, num_frames=num_frames, mode=hml_mode, abs_path=abs_path, fixed_len=fixed_len, + device=device, autoregressive=autoregressive) + else: + dataset = DATA(split=split, num_frames=num_frames) + return dataset + + +def get_dataset_loader(name, batch_size, num_frames, split='train', hml_mode='train', fixed_len=0, pred_len=0, + device=None, autoregressive=False): + dataset = get_dataset(name, num_frames, split=split, hml_mode=hml_mode, fixed_len=fixed_len, + device=device, autoregressive=autoregressive) + + collate = get_collate_fn(name, hml_mode, pred_len, batch_size) + + loader = DataLoader( + dataset, batch_size=batch_size, shuffle=True, + num_workers=8, drop_last=True, collate_fn=collate + ) + + return loader \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml/README.md b/motion_diffusion_model/data_loaders/humanml/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4bf224f6b341e21f549a27a000d8400c4909c6c1 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/README.md @@ -0,0 +1 @@ +This code is based on https://github.com/EricGuo5513/text-to-motion.git \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml/common/quaternion.py b/motion_diffusion_model/data_loaders/humanml/common/quaternion.py new file mode 100644 index 0000000000000000000000000000000000000000..1e414a63a790b0b3b78d734b89f0d08dac2e8378 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/common/quaternion.py @@ -0,0 +1,425 @@ +# Copyright (c) 2018-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import torch +import numpy as np + +_EPS4 = np.finfo(float).eps * 4.0 + +_FLOAT_EPS = np.finfo(float).eps + +# PyTorch-backed implementations +def qinv(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + mask = torch.ones_like(q) + mask[..., 1:] = -mask[..., 1:] + return q * mask + + +def qinv_np(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + return qinv(torch.from_numpy(q).float()).numpy() + + +def qnormalize(q): + assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)' + q[..., -1] += 1e-4 # Guy - for safty, avoid zero devision + return q / torch.norm(q, dim=-1, keepdim=True) + + +def qmul(q, r): + """ + Multiply quaternion(s) q with quaternion(s) r. + Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions. + Returns q*r as a tensor of shape (*, 4). + """ + assert q.shape[-1] == 4 + assert r.shape[-1] == 4 + + original_shape = q.shape + + # Compute outer product + # terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4)) + terms = torch.bmm(r.reshape(-1, 4, 1), q.reshape(-1, 1, 4)) + + w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3] + x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2] + y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1] + z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0] + return torch.stack((w, x, y, z), dim=1).view(original_shape) + + +def qrot(q, v): + """ + Rotate vector(s) v about the rotation described by quaternion(s) q. + Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, + where * denotes any number of dimensions. + Returns a tensor of shape (*, 3). + """ + assert q.shape[-1] == 4 + assert v.shape[-1] == 3 + assert q.shape[:-1] == v.shape[:-1] + + original_shape = list(v.shape) + # print(q.shape) + q = q.contiguous().view(-1, 4) + v = v.contiguous().view(-1, 3) + + qvec = q[:, 1:].to(v.device) + uv = torch.cross(qvec, v, dim=1) + uuv = torch.cross(qvec, uv, dim=1) + return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape) + + +def qeuler(q, order, epsilon=0, deg=True): + """ + Convert quaternion(s) q to Euler angles. + Expects a tensor of shape (*, 4), where * denotes any number of dimensions. + Returns a tensor of shape (*, 3). + """ + assert q.shape[-1] == 4 + + original_shape = list(q.shape) + original_shape[-1] = 3 + q = q.view(-1, 4) + + q0 = q[:, 0] + q1 = q[:, 1] + q2 = q[:, 2] + q3 = q[:, 3] + + if order == 'xyz': + x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1 + epsilon, 1 - epsilon)) + z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) + elif order == 'yzx': + x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3)) + z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1 + epsilon, 1 - epsilon)) + elif order == 'zxy': + x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1 + epsilon, 1 - epsilon)) + y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q1 * q1 + q3 * q3)) + elif order == 'xzy': + x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3)) + z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1 + epsilon, 1 - epsilon)) + elif order == 'yxz': + x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1 + epsilon, 1 - epsilon)) + y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2 * (q1 * q1 + q2 * q2)) + z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3)) + elif order == 'zyx': + x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2)) + y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1 + epsilon, 1 - epsilon)) + z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3)) + else: + raise + + if deg: + return torch.stack((x, y, z), dim=1).view(original_shape) * 180 / np.pi + else: + return torch.stack((x, y, z), dim=1).view(original_shape) + + +# Numpy-backed implementations + +def qmul_np(q, r): + q = torch.from_numpy(q).contiguous().float() + r = torch.from_numpy(r).contiguous().float() + return qmul(q, r).numpy() + + +def qrot_np(q, v): + q = torch.from_numpy(q).contiguous().float() + v = torch.from_numpy(v).contiguous().float() + return qrot(q, v).numpy() + + +def qeuler_np(q, order, epsilon=0, use_gpu=False): + if use_gpu: + q = torch.from_numpy(q).cuda().float() + return qeuler(q, order, epsilon).cpu().numpy() + else: + q = torch.from_numpy(q).contiguous().float() + return qeuler(q, order, epsilon).numpy() + + +def qfix(q): + """ + Enforce quaternion continuity across the time dimension by selecting + the representation (q or -q) with minimal distance (or, equivalently, maximal dot product) + between two consecutive frames. + + Expects a tensor of shape (L, J, 4), where L is the sequence length and J is the number of joints. + Returns a tensor of the same shape. + """ + assert len(q.shape) == 3 + assert q.shape[-1] == 4 + + result = q.copy() + dot_products = np.sum(q[1:] * q[:-1], axis=2) + mask = dot_products < 0 + mask = (np.cumsum(mask, axis=0) % 2).astype(bool) + result[1:][mask] *= -1 + return result + + +def euler2quat(e, order, deg=True): + """ + Convert Euler angles to quaternions. + """ + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + + e = e.view(-1, 3) + + ## if euler angles in degrees + if deg: + e = e * np.pi / 180. + + x = e[:, 0] + y = e[:, 1] + z = e[:, 2] + + rx = torch.stack((torch.cos(x / 2), torch.sin(x / 2), torch.zeros_like(x), torch.zeros_like(x)), dim=1) + ry = torch.stack((torch.cos(y / 2), torch.zeros_like(y), torch.sin(y / 2), torch.zeros_like(y)), dim=1) + rz = torch.stack((torch.cos(z / 2), torch.zeros_like(z), torch.zeros_like(z), torch.sin(z / 2)), dim=1) + + result = None + for coord in order: + if coord == 'x': + r = rx + elif coord == 'y': + r = ry + elif coord == 'z': + r = rz + else: + raise + if result is None: + result = r + else: + result = qmul(result, r) + + # Reverse antipodal representation to have a non-negative "w" + if order in ['xyz', 'yzx', 'zxy']: + result *= -1 + + return result.view(original_shape) + + +def expmap_to_quaternion(e): + """ + Convert axis-angle rotations (aka exponential maps) to quaternions. + Stable formula from "Practical Parameterization of Rotations Using the Exponential Map". + Expects a tensor of shape (*, 3), where * denotes any number of dimensions. + Returns a tensor of shape (*, 4). + """ + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + e = e.reshape(-1, 3) + + theta = np.linalg.norm(e, axis=1).reshape(-1, 1) + w = np.cos(0.5 * theta).reshape(-1, 1) + xyz = 0.5 * np.sinc(0.5 * theta / np.pi) * e + return np.concatenate((w, xyz), axis=1).reshape(original_shape) + + +def euler_to_quaternion(e, order): + """ + Convert Euler angles to quaternions. + """ + assert e.shape[-1] == 3 + + original_shape = list(e.shape) + original_shape[-1] = 4 + + e = e.reshape(-1, 3) + + x = e[:, 0] + y = e[:, 1] + z = e[:, 2] + + rx = np.stack((np.cos(x / 2), np.sin(x / 2), np.zeros_like(x), np.zeros_like(x)), axis=1) + ry = np.stack((np.cos(y / 2), np.zeros_like(y), np.sin(y / 2), np.zeros_like(y)), axis=1) + rz = np.stack((np.cos(z / 2), np.zeros_like(z), np.zeros_like(z), np.sin(z / 2)), axis=1) + + result = None + for coord in order: + if coord == 'x': + r = rx + elif coord == 'y': + r = ry + elif coord == 'z': + r = rz + else: + raise + if result is None: + result = r + else: + result = qmul_np(result, r) + + # Reverse antipodal representation to have a non-negative "w" + if order in ['xyz', 'yzx', 'zxy']: + result *= -1 + + return result.reshape(original_shape) + + +def quaternion_to_matrix(quaternions): + """ + Convert rotations given as quaternions to rotation matrices. + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def quaternion_to_matrix_np(quaternions): + q = torch.from_numpy(quaternions).contiguous().float() + return quaternion_to_matrix(q).numpy() + + +def quaternion_to_cont6d_np(quaternions): + rotation_mat = quaternion_to_matrix_np(quaternions) + cont_6d = np.concatenate([rotation_mat[..., 0], rotation_mat[..., 1]], axis=-1) + return cont_6d + + +def quaternion_to_cont6d(quaternions): + rotation_mat = quaternion_to_matrix(quaternions) + cont_6d = torch.cat([rotation_mat[..., 0], rotation_mat[..., 1]], dim=-1) + return cont_6d + + +def cont6d_to_matrix(cont6d): + assert cont6d.shape[-1] == 6, "The last dimension must be 6" + x_raw = cont6d[..., 0:3] + y_raw = cont6d[..., 3:6] + + x = x_raw / torch.norm(x_raw, dim=-1, keepdim=True) + z = torch.cross(x, y_raw, dim=-1) + z = z / torch.norm(z, dim=-1, keepdim=True) + + y = torch.cross(z, x, dim=-1) + + x = x[..., None] + y = y[..., None] + z = z[..., None] + + mat = torch.cat([x, y, z], dim=-1) + return mat + + +def cont6d_to_matrix_np(cont6d): + q = torch.from_numpy(cont6d).contiguous().float() + return cont6d_to_matrix(q).numpy() + + +def qpow(q0, t, dtype=torch.float): + ''' q0 : tensor of quaternions + t: tensor of powers + ''' + q0 = qnormalize(q0) + theta0 = torch.acos(q0[..., 0]) + + ## if theta0 is close to zero, add epsilon to avoid NaNs + mask = (theta0 <= 10e-10) * (theta0 >= -10e-10) + theta0 = (1 - mask) * theta0 + mask * 10e-10 + v0 = q0[..., 1:] / torch.sin(theta0).view(-1, 1) + + if isinstance(t, torch.Tensor): + q = torch.zeros(t.shape + q0.shape) + theta = t.view(-1, 1) * theta0.view(1, -1) + else: ## if t is a number + q = torch.zeros(q0.shape) + theta = t * theta0 + + q[..., 0] = torch.cos(theta) + q[..., 1:] = v0 * torch.sin(theta).unsqueeze(-1) + + return q.to(dtype) + + +def qslerp(q0, q1, t): + ''' + q0: starting quaternion + q1: ending quaternion + t: array of points along the way + + Returns: + Tensor of Slerps: t.shape + q0.shape + ''' + + q0 = qnormalize(q0) + q1 = qnormalize(q1) + q_ = qpow(qmul(q1, qinv(q0)), t) + + return qmul(q_, + q0.contiguous().view(torch.Size([1] * len(t.shape)) + q0.shape).expand(t.shape + q0.shape).contiguous()) + + +def qbetween(v0, v1): + ''' + find the quaternion used to rotate v0 to v1 + ''' + assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)' + assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)' + + v = torch.cross(v0, v1) + w = torch.sqrt((v0 ** 2).sum(dim=-1, keepdim=True) * (v1 ** 2).sum(dim=-1, keepdim=True)) + (v0 * v1).sum(dim=-1, + keepdim=True) + return qnormalize(torch.cat([w, v], dim=-1)) + + +def qbetween_np(v0, v1): + ''' + find the quaternion used to rotate v0 to v1 + ''' + assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)' + assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)' + + v0 = torch.from_numpy(v0).float() + v1 = torch.from_numpy(v1).float() + return qbetween(v0, v1).numpy() + + +def lerp(p0, p1, t): + if not isinstance(t, torch.Tensor): + t = torch.Tensor([t]) + + new_shape = t.shape + p0.shape + new_view_t = t.shape + torch.Size([1] * len(p0.shape)) + new_view_p = torch.Size([1] * len(t.shape)) + p0.shape + p0 = p0.view(new_view_p).expand(new_shape) + p1 = p1.view(new_view_p).expand(new_shape) + t = t.view(new_view_t).expand(new_shape) + + return p0 + t * (p1 - p0) diff --git a/motion_diffusion_model/data_loaders/humanml/common/skeleton.py b/motion_diffusion_model/data_loaders/humanml/common/skeleton.py new file mode 100644 index 0000000000000000000000000000000000000000..3e5a46ec14dc659e92f675ece60d115262c54449 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/common/skeleton.py @@ -0,0 +1,202 @@ +from data_loaders.humanml.common.quaternion import * +import scipy.ndimage.filters as filters + +class Skeleton(object): + def __init__(self, offset, kinematic_tree, device): + self.device = device + self._raw_offset_np = offset.numpy() + self._raw_offset = offset.clone().detach().to(device).float() + self._kinematic_tree = kinematic_tree + self._offset = None + self._parents = [0] * len(self._raw_offset) + self._parents[0] = -1 + for chain in self._kinematic_tree: + for j in range(1, len(chain)): + self._parents[chain[j]] = chain[j-1] + + def njoints(self): + return len(self._raw_offset) + + def offset(self): + return self._offset + + def set_offset(self, offsets): + self._offset = offsets.clone().detach().to(self.device).float() + + def kinematic_tree(self): + return self._kinematic_tree + + def parents(self): + return self._parents + + # joints (batch_size, joints_num, 3) + def get_offsets_joints_batch(self, joints): + assert len(joints.shape) == 3 + _offsets = self._raw_offset.expand(joints.shape[0], -1, -1).clone() + for i in range(1, self._raw_offset.shape[0]): + _offsets[:, i] = torch.norm(joints[:, i] - joints[:, self._parents[i]], p=2, dim=1)[:, None] * _offsets[:, i] + + self._offset = _offsets.detach() + return _offsets + + # joints (joints_num, 3) + def get_offsets_joints(self, joints): + assert len(joints.shape) == 2 + _offsets = self._raw_offset.clone() + for i in range(1, self._raw_offset.shape[0]): + # print(joints.shape) + _offsets[i] = torch.norm(joints[i] - joints[self._parents[i]], p=2, dim=0) * _offsets[i] + + self._offset = _offsets.detach() + return _offsets + + # face_joint_idx should follow the order of right hip, left hip, right shoulder, left shoulder + # joints (batch_size, joints_num, 3) + def inverse_kinematics_np(self, joints, face_joint_idx, smooth_forward=False, fix_bug=False): + assert len(face_joint_idx) == 4 + '''Get Forward Direction''' + if fix_bug: + r_hip, l_hip, sdr_r, sdr_l = face_joint_idx + else: + l_hip, r_hip, sdr_r, sdr_l = face_joint_idx + across1 = joints[:, r_hip] - joints[:, l_hip] + across2 = joints[:, sdr_r] - joints[:, sdr_l] + across = across1 + across2 + across = across / np.sqrt((across**2).sum(axis=-1))[:, np.newaxis] + # print(across1.shape, across2.shape) + + # forward (batch_size, 3) + forward = np.cross(np.array([[0, 1, 0]]), across, axis=-1) + if smooth_forward: + forward = filters.gaussian_filter1d(forward, 20, axis=0, mode='nearest') + # forward (batch_size, 3) + forward = forward / np.sqrt((forward**2).sum(axis=-1))[..., np.newaxis] + + '''Get Root Rotation''' + target = np.array([[0,0,1]]).repeat(len(forward), axis=0) + root_quat = qbetween_np(forward, target) # angle from root to Z+ (= how much to rotate root such that it faces Z+) + + '''Inverse Kinematics''' + # quat_params (batch_size, joints_num, 4) + # print(joints.shape[:-1]) + quat_params = np.zeros(joints.shape[:-1] + (4,)) + # print(quat_params.shape) + # root_quat[0] = np.array([[1.0, 0.0, 0.0, 0.0]]) # this is a bug: the rotation of next joint in chain is computed wrt the root joint, which is now 0, but the next joint was not moved so it is like a huge rotation + quat_params[:, 0] = root_quat + # quat_params[0, 0] = np.array([[1.0, 0.0, 0.0, 0.0]]) + for chain in self._kinematic_tree: + R = root_quat + for j in range(len(chain) - 1): + # (batch, 3) + u = self._raw_offset_np[chain[j+1]][np.newaxis,...].repeat(len(joints), axis=0) # rest-pose bone direction for joint j in the chain + # print(u.shape) + # (batch, 3) + v = joints[:, chain[j+1]] - joints[:, chain[j]] # data bone direction for joint j+1 in the chain + v = v / np.sqrt((v**2).sum(axis=-1))[:, np.newaxis] + # print(u.shape, v.shape) + rot_u_v = qbetween_np(u, v) # angle betweem rest-pose bone and data bone (bone is j to j+1) + + R_loc = qmul_np(qinv_np(R), rot_u_v) # bring angle to be local coordinate system, i.e., relative to the parent bone + + quat_params[:,chain[j + 1], :] = R_loc + R = qmul_np(R, R_loc) + + return quat_params + + # Be sure root joint is at the beginning of kinematic chains + def forward_kinematics(self, quat_params, root_pos, skel_joints=None, do_root_R=True): + # quat_params (batch_size, joints_num, 4) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(quat_params.shape[0], -1, -1) + joints = torch.zeros(quat_params.shape[:-1] + (3,)).to(self.device) + joints[:, 0] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + R = quat_params[:, 0] + else: + R = torch.tensor([[1.0, 0.0, 0.0, 0.0]]).expand(len(quat_params), -1).detach().to(self.device) + for i in range(1, len(chain)): + R = qmul(R, quat_params[:, chain[i]]) + offset_vec = offsets[:, chain[i]] + joints[:, chain[i]] = qrot(R, offset_vec) + joints[:, chain[i-1]] + return joints + + # Be sure root joint is at the beginning of kinematic chains + def forward_kinematics_np(self, quat_params, root_pos, skel_joints=None, do_root_R=True): + # quat_params (batch_size, joints_num, 4) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + skel_joints = torch.from_numpy(skel_joints) + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(quat_params.shape[0], -1, -1) + offsets = offsets.numpy() + joints = np.zeros(quat_params.shape[:-1] + (3,)) + joints[:, 0] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + R = quat_params[:, 0] + else: + R = np.array([[1.0, 0.0, 0.0, 0.0]]).repeat(len(quat_params), axis=0) + for i in range(1, len(chain)): + R = qmul_np(R, quat_params[:, chain[i]]) + offset_vec = offsets[:, chain[i]] + joints[:, chain[i]] = qrot_np(R, offset_vec) + joints[:, chain[i - 1]] + return joints + + def forward_kinematics_cont6d_np(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True): + # cont6d_params (batch_size, joints_num, 6) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + skel_joints = torch.from_numpy(skel_joints) + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(cont6d_params.shape[0], -1, -1) + offsets = offsets.numpy() + joints = np.zeros(cont6d_params.shape[:-1] + (3,)) + joints[:, 0] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + matR = cont6d_to_matrix_np(cont6d_params[:, 0]) + else: + matR = np.eye(3)[np.newaxis, :].repeat(len(cont6d_params), axis=0) + for i in range(1, len(chain)): + matR = np.matmul(matR, cont6d_to_matrix_np(cont6d_params[:, chain[i]])) + offset_vec = offsets[:, chain[i]][..., np.newaxis] + # print(matR.shape, offset_vec.shape) + joints[:, chain[i]] = np.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]] + return joints + + def forward_kinematics_cont6d(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True): + # cont6d_params (batch_size, joints_num, 6) + # joints (batch_size, joints_num, 3) + # root_pos (batch_size, 3) + if skel_joints is not None: + # skel_joints = torch.from_numpy(skel_joints) + offsets = self.get_offsets_joints_batch(skel_joints) + if len(self._offset.shape) == 2: + offsets = self._offset.expand(cont6d_params.shape[0], -1, -1) + joints = torch.zeros(cont6d_params.shape[:-1] + (3,)).to(cont6d_params.device) + joints[..., 0, :] = root_pos + for chain in self._kinematic_tree: + if do_root_R: + matR = cont6d_to_matrix(cont6d_params[:, 0]) + else: + matR = torch.eye(3).expand((len(cont6d_params), -1, -1)).detach().to(cont6d_params.device) + for i in range(1, len(chain)): + matR = torch.matmul(matR, cont6d_to_matrix(cont6d_params[:, chain[i]])) + offset_vec = offsets[:, chain[i]].unsqueeze(-1) + # print(matR.shape, offset_vec.shape) + joints[:, chain[i]] = torch.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]] + return joints + + + + + diff --git a/motion_diffusion_model/data_loaders/humanml/data/__init__.py b/motion_diffusion_model/data_loaders/humanml/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/motion_diffusion_model/data_loaders/humanml/data/dataset.py b/motion_diffusion_model/data_loaders/humanml/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..eeed96557d9842a33486fa5c861bf27393839145 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/data/dataset.py @@ -0,0 +1,823 @@ +import torch +from torch.utils import data +import numpy as np +import os +from os.path import join as pjoin +import random +import codecs as cs +from tqdm import tqdm +import spacy + +from torch.utils.data._utils.collate import default_collate +from data_loaders.humanml.utils.word_vectorizer import WordVectorizer +from data_loaders.humanml.utils.get_opt import get_opt + +# import spacy + +def collate_fn(batch): + batch.sort(key=lambda x: x[3], reverse=True) + return default_collate(batch) + + +'''For use of training text-2-motion generative model''' +class Text2MotionDataset(data.Dataset): + def __init__(self, opt, mean, std, split_file, w_vectorizer): + self.opt = opt + self.w_vectorizer = w_vectorizer + self.max_length = 20 + self.pointer = 0 + min_motion_len = 40 if self.opt.dataset_name =='t2m' else 24 + + joints_num = opt.joints_num + + data_dict = {} + id_list = [] + with cs.open(split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + + new_name_list = [] + length_list = [] + for name in tqdm(id_list): + try: + motion = np.load(pjoin(opt.motion_dir, name + '.npy')) + if (len(motion)) < min_motion_len or (len(motion) >= 200): + continue + text_data = [] + flag = False + with cs.open(pjoin(opt.text_dir, name + '.txt')) as f: + for line in f.readlines(): + text_dict = {} + line_split = line.strip().split('#') + caption = line_split[0] + tokens = line_split[1].split(' ') + f_tag = float(line_split[2]) + to_tag = float(line_split[3]) + f_tag = 0.0 if np.isnan(f_tag) else f_tag + to_tag = 0.0 if np.isnan(to_tag) else to_tag + + text_dict['caption'] = caption + text_dict['tokens'] = tokens + if f_tag == 0.0 and to_tag == 0.0: + flag = True + text_data.append(text_dict) + else: + try: + n_motion = motion[int(f_tag*20) : int(to_tag*20)] + if (len(n_motion)) < min_motion_len or (len(n_motion) >= 200): + continue + new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name + while new_name in data_dict: + new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name + data_dict[new_name] = {'motion': n_motion, + 'length': len(n_motion), + 'text':[text_dict]} + new_name_list.append(new_name) + length_list.append(len(n_motion)) + except: + print(line_split) + print(line_split[2], line_split[3], f_tag, to_tag, name) + # break + + if flag: + data_dict[name] = {'motion': motion, + 'length': len(motion), + 'text':text_data} + new_name_list.append(name) + length_list.append(len(motion)) + except: + # Some motion may not exist in KIT dataset + pass + + + name_list, length_list = zip(*sorted(zip(new_name_list, length_list), key=lambda x: x[1])) + + if opt.is_train: + # root_rot_velocity (B, seq_len, 1) + std[0:1] = std[0:1] / opt.feat_bias + # root_linear_velocity (B, seq_len, 2) + std[1:3] = std[1:3] / opt.feat_bias + # root_y (B, seq_len, 1) + std[3:4] = std[3:4] / opt.feat_bias + # ric_data (B, seq_len, (joint_num - 1)*3) + std[4: 4 + (joints_num - 1) * 3] = std[4: 4 + (joints_num - 1) * 3] / 1.0 + # rot_data (B, seq_len, (joint_num - 1)*6) + std[4 + (joints_num - 1) * 3: 4 + (joints_num - 1) * 9] = std[4 + (joints_num - 1) * 3: 4 + ( + joints_num - 1) * 9] / 1.0 + # local_velocity (B, seq_len, joint_num*3) + std[4 + (joints_num - 1) * 9: 4 + (joints_num - 1) * 9 + joints_num * 3] = std[ + 4 + (joints_num - 1) * 9: 4 + ( + joints_num - 1) * 9 + joints_num * 3] / 1.0 + # foot contact (B, seq_len, 4) + std[4 + (joints_num - 1) * 9 + joints_num * 3:] = std[ + 4 + (joints_num - 1) * 9 + joints_num * 3:] / opt.feat_bias + + assert 4 + (joints_num - 1) * 9 + joints_num * 3 + 4 == mean.shape[-1] + np.save(pjoin(opt.meta_dir, 'mean.npy'), mean) + np.save(pjoin(opt.meta_dir, 'std.npy'), std) + + self.mean = mean + self.std = std + self.length_arr = np.array(length_list) + self.data_dict = data_dict + self.name_list = name_list + self.reset_max_len(self.max_length) + + def reset_max_len(self, length): + assert length <= self.opt.max_motion_length + self.pointer = np.searchsorted(self.length_arr, length) + print("Pointer Pointing at %d"%self.pointer) + self.max_length = length + + def inv_transform(self, data): + return data * self.std + self.mean + + def __len__(self): + return len(self.data_dict) - self.pointer + + def __getitem__(self, item): + idx = self.pointer + item + data = self.data_dict[self.name_list[idx]] + motion, m_length, text_list = data['motion'], data['length'], data['text'] + # Randomly select a caption + text_data = random.choice(text_list) + caption, tokens = text_data['caption'], text_data['tokens'] + + if len(tokens) < self.opt.max_text_len: + # pad with "unk" + tokens = ['sos/OTHER'] + tokens + ['eos/OTHER'] + sent_len = len(tokens) + tokens = tokens + ['unk/OTHER'] * (self.opt.max_text_len + 2 - sent_len) + else: + # crop + tokens = tokens[:self.opt.max_text_len] + tokens = ['sos/OTHER'] + tokens + ['eos/OTHER'] + sent_len = len(tokens) + pos_one_hots = [] + word_embeddings = [] + for token in tokens: + word_emb, pos_oh = self.w_vectorizer[token] + pos_one_hots.append(pos_oh[None, :]) + word_embeddings.append(word_emb[None, :]) + pos_one_hots = np.concatenate(pos_one_hots, axis=0) + word_embeddings = np.concatenate(word_embeddings, axis=0) + + len_gap = (m_length - self.max_length) // self.opt.unit_length + + if self.opt.is_train: + if m_length != self.max_length: + # print("Motion original length:%d_%d"%(m_length, len(motion))) + if self.opt.unit_length < 10: + coin2 = np.random.choice(['single', 'single', 'double']) + else: + coin2 = 'single' + if len_gap == 0 or (len_gap == 1 and coin2 == 'double'): + m_length = self.max_length + idx = random.randint(0, m_length - self.max_length) + motion = motion[idx:idx+self.max_length] + else: + if coin2 == 'single': + n_m_length = self.max_length + self.opt.unit_length * len_gap + else: + n_m_length = self.max_length + self.opt.unit_length * (len_gap - 1) + idx = random.randint(0, m_length - n_m_length) + motion = motion[idx:idx + self.max_length] + m_length = n_m_length + # print(len_gap, idx, coin2) + else: + if self.opt.unit_length < 10: + coin2 = np.random.choice(['single', 'single', 'double']) + else: + coin2 = 'single' + + if coin2 == 'double': + m_length = (m_length // self.opt.unit_length - 1) * self.opt.unit_length + elif coin2 == 'single': + m_length = (m_length // self.opt.unit_length) * self.opt.unit_length + idx = random.randint(0, len(motion) - m_length) + motion = motion[idx:idx+m_length] + + "Z Normalization" + motion = (motion - self.mean) / self.std + + return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length + + +'''For use of training text motion matching model, and evaluations''' +class Text2MotionDatasetV2(data.Dataset): + def __init__(self, opt, mean, std, split_file, w_vectorizer): + self.opt = opt + self.w_vectorizer = w_vectorizer + self.max_length = 20 + if self.opt.fixed_len > 0: + self.max_length = self.opt.fixed_len + self.pointer = 0 + self.max_motion_length = opt.max_motion_length + min_motion_len = 40 if self.opt.dataset_name =='t2m' else 24 + + data_dict = {} + id_list = [] + with cs.open(split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + # id_list = id_list[:200] + + new_name_list = [] + length_list = [] + + _split = os.path.basename(split_file).replace('.txt', '') + _name ='' + # cache_path = os.path.join(opt.meta_dir, self.opt.dataset_name + '_' + _split + _name + '.npy') + cache_path = os.path.join(opt.cache_dir, 'dataset', self.opt.dataset_name + '_' + _split + _name + '.npy') + if opt.use_cache and os.path.exists(cache_path): + print(f'Loading motions from cache file [{cache_path}]...') + _cache = np.load(cache_path, allow_pickle=True)[None][0] + name_list, length_list, data_dict = _cache['name_list'], _cache['length_list'], _cache['data_dict'] + # name_list = name_list[:15]; length_list = length_list[:15] + # data_dict = {key: data_dict[key] for key in name_list} + else: + for name in tqdm(id_list): + try: + motion = np.load(pjoin(opt.motion_dir, name + '.npy')) + if (len(motion)) < min_motion_len or (len(motion) >= 200): + continue + text_data = [] + flag = False + with cs.open(pjoin(opt.text_dir, name + '.txt')) as f: + for line in f.readlines(): + text_dict = {} + line_split = line.strip().split('#') + caption = line_split[0] + tokens = line_split[1].split(' ') + f_tag = float(line_split[2]) + to_tag = float(line_split[3]) + f_tag = 0.0 if np.isnan(f_tag) else f_tag + to_tag = 0.0 if np.isnan(to_tag) else to_tag + + text_dict['caption'] = caption + text_dict['tokens'] = tokens + if f_tag == 0.0 and to_tag == 0.0: + flag = True + text_data.append(text_dict) + else: + try: + n_motion = motion[int(f_tag*20) : int(to_tag*20)] + if (len(n_motion)) < min_motion_len or (len(n_motion) >= 200): + continue + new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name + while new_name in data_dict: + new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name + data_dict[new_name] = {'motion': n_motion, + 'length': len(n_motion), + 'text':[text_dict]} + new_name_list.append(new_name) + length_list.append(len(n_motion)) + except: + print(line_split) + print(line_split[2], line_split[3], f_tag, to_tag, name) + # break + + if flag: + data_dict[name] = {'motion': motion, + 'length': len(motion), + 'text': text_data} + new_name_list.append(name) + length_list.append(len(motion)) + except: + pass + + name_list, length_list = zip(*sorted(zip(new_name_list, length_list), key=lambda x: x[1])) + print(f'Saving motions to cache file [{cache_path}]...') + np.save(cache_path, { + 'name_list': name_list, + 'length_list': length_list, + 'data_dict': data_dict}) + + self.mean = mean + self.std = std + self.length_arr = np.array(length_list) + self.data_dict = data_dict + self.name_list = name_list + self.reset_max_len(self.max_length) + + def reset_max_len(self, length): + assert length <= self.max_motion_length + self.pointer = np.searchsorted(self.length_arr, length) + print("Pointer Pointing at %d"%self.pointer) + self.max_length = length + + def inv_transform(self, data): + return data * self.std + self.mean + + def __len__(self): + return len(self.data_dict) - self.pointer + + def __getitem__(self, item): + idx = self.pointer + item + key = self.name_list[idx] + data = self.data_dict[key] + motion, m_length, text_list = data['motion'], data['length'], data['text'] + # Randomly select a caption + text_data = random.choice(text_list) + caption, tokens = text_data['caption'], text_data['tokens'] + + if len(tokens) < self.opt.max_text_len: + # pad with "unk" + tokens = ['sos/OTHER'] + tokens + ['eos/OTHER'] + sent_len = len(tokens) + tokens = tokens + ['unk/OTHER'] * (self.opt.max_text_len + 2 - sent_len) + else: + # crop + tokens = tokens[:self.opt.max_text_len] + tokens = ['sos/OTHER'] + tokens + ['eos/OTHER'] + sent_len = len(tokens) + pos_one_hots = [] + word_embeddings = [] + for token in tokens: + word_emb, pos_oh = self.w_vectorizer[token] + pos_one_hots.append(pos_oh[None, :]) + word_embeddings.append(word_emb[None, :]) + pos_one_hots = np.concatenate(pos_one_hots, axis=0) + word_embeddings = np.concatenate(word_embeddings, axis=0) + + # Crop the motions in to times of 4, and introduce small variations + if self.opt.unit_length < 10: + coin2 = np.random.choice(['single', 'single', 'double']) + else: + coin2 = 'single' + + if coin2 == 'double': + m_length = (m_length // self.opt.unit_length - 1) * self.opt.unit_length + elif coin2 == 'single': + m_length = (m_length // self.opt.unit_length) * self.opt.unit_length + + original_length = None + if self.opt.fixed_len > 0: + # Crop fixed_len + original_length = m_length + m_length = self.opt.fixed_len + + idx = random.randint(0, len(motion) - m_length) + if self.opt.disable_offset_aug: + idx = random.randint(0, self.opt.unit_length) + motion = motion[idx:idx+m_length] + + "Z Normalization" + motion = (motion - self.mean) / self.std + + if m_length < self.max_motion_length: + motion = np.concatenate([motion, + np.zeros((self.max_motion_length - m_length, motion.shape[1])) + ], axis=0) + # print(word_embeddings.shape, motion.shape) + # print(tokens) + + length = (original_length, m_length) if self.opt.fixed_len > 0 else m_length + + return word_embeddings, pos_one_hots, caption, sent_len, motion, length, '_'.join(tokens) + + +'''For use of training baseline''' +class Text2MotionDatasetBaseline(data.Dataset): + def __init__(self, opt, mean, std, split_file, w_vectorizer): + self.opt = opt + self.w_vectorizer = w_vectorizer + self.max_length = 20 + self.pointer = 0 + self.max_motion_length = opt.max_motion_length + min_motion_len = 40 if self.opt.dataset_name =='t2m' else 24 + + data_dict = {} + id_list = [] + with cs.open(split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + # id_list = id_list[:200] + + new_name_list = [] + length_list = [] + for name in tqdm(id_list): + try: + motion = np.load(pjoin(opt.motion_dir, name + '.npy')) + if (len(motion)) < min_motion_len or (len(motion) >= 200): + continue + text_data = [] + flag = False + with cs.open(pjoin(opt.text_dir, name + '.txt')) as f: + for line in f.readlines(): + text_dict = {} + line_split = line.strip().split('#') + caption = line_split[0] + tokens = line_split[1].split(' ') + f_tag = float(line_split[2]) + to_tag = float(line_split[3]) + f_tag = 0.0 if np.isnan(f_tag) else f_tag + to_tag = 0.0 if np.isnan(to_tag) else to_tag + + text_dict['caption'] = caption + text_dict['tokens'] = tokens + if f_tag == 0.0 and to_tag == 0.0: + flag = True + text_data.append(text_dict) + else: + try: + n_motion = motion[int(f_tag*20) : int(to_tag*20)] + if (len(n_motion)) < min_motion_len or (len(n_motion) >= 200): + continue + new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name + while new_name in data_dict: + new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name + data_dict[new_name] = {'motion': n_motion, + 'length': len(n_motion), + 'text':[text_dict]} + new_name_list.append(new_name) + length_list.append(len(n_motion)) + except: + print(line_split) + print(line_split[2], line_split[3], f_tag, to_tag, name) + # break + + if flag: + data_dict[name] = {'motion': motion, + 'length': len(motion), + 'text': text_data} + new_name_list.append(name) + length_list.append(len(motion)) + except: + pass + + name_list, length_list = zip(*sorted(zip(new_name_list, length_list), key=lambda x: x[1])) + + self.mean = mean + self.std = std + self.length_arr = np.array(length_list) + self.data_dict = data_dict + self.name_list = name_list + self.reset_max_len(self.max_length) + + def reset_max_len(self, length): + assert length <= self.max_motion_length + self.pointer = np.searchsorted(self.length_arr, length) + print("Pointer Pointing at %d"%self.pointer) + self.max_length = length + + def inv_transform(self, data): + return data * self.std + self.mean + + def __len__(self): + return len(self.data_dict) - self.pointer + + def __getitem__(self, item): + idx = self.pointer + item + data = self.data_dict[self.name_list[idx]] + motion, m_length, text_list = data['motion'], data['length'], data['text'] + # Randomly select a caption + text_data = random.choice(text_list) + caption, tokens = text_data['caption'], text_data['tokens'] + + if len(tokens) < self.opt.max_text_len: + # pad with "unk" + tokens = ['sos/OTHER'] + tokens + ['eos/OTHER'] + sent_len = len(tokens) + tokens = tokens + ['unk/OTHER'] * (self.opt.max_text_len + 2 - sent_len) + else: + # crop + tokens = tokens[:self.opt.max_text_len] + tokens = ['sos/OTHER'] + tokens + ['eos/OTHER'] + sent_len = len(tokens) + pos_one_hots = [] + word_embeddings = [] + for token in tokens: + word_emb, pos_oh = self.w_vectorizer[token] + pos_one_hots.append(pos_oh[None, :]) + word_embeddings.append(word_emb[None, :]) + pos_one_hots = np.concatenate(pos_one_hots, axis=0) + word_embeddings = np.concatenate(word_embeddings, axis=0) + + len_gap = (m_length - self.max_length) // self.opt.unit_length + + if m_length != self.max_length: + # print("Motion original length:%d_%d"%(m_length, len(motion))) + if self.opt.unit_length < 10: + coin2 = np.random.choice(['single', 'single', 'double']) + else: + coin2 = 'single' + if len_gap == 0 or (len_gap == 1 and coin2 == 'double'): + m_length = self.max_length + s_idx = random.randint(0, m_length - self.max_length) + else: + if coin2 == 'single': + n_m_length = self.max_length + self.opt.unit_length * len_gap + else: + n_m_length = self.max_length + self.opt.unit_length * (len_gap - 1) + s_idx = random.randint(0, m_length - n_m_length) + m_length = n_m_length + else: + s_idx = 0 + + src_motion = motion[s_idx: s_idx + m_length] + tgt_motion = motion[s_idx: s_idx + self.max_length] + + "Z Normalization" + src_motion = (src_motion - self.mean) / self.std + tgt_motion = (tgt_motion - self.mean) / self.std + + if m_length < self.max_motion_length: + src_motion = np.concatenate([src_motion, + np.zeros((self.max_motion_length - m_length, motion.shape[1])) + ], axis=0) + # print(m_length, src_motion.shape, tgt_motion.shape) + # print(word_embeddings.shape, motion.shape) + # print(tokens) + return word_embeddings, caption, sent_len, src_motion, tgt_motion, m_length + + +class MotionDatasetV2(data.Dataset): + def __init__(self, opt, mean, std, split_file): + self.opt = opt + joints_num = opt.joints_num + + self.data = [] + self.lengths = [] + id_list = [] + with cs.open(split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + + for name in tqdm(id_list): + try: + motion = np.load(pjoin(opt.motion_dir, name + '.npy')) + if motion.shape[0] < opt.window_size: + continue + self.lengths.append(motion.shape[0] - opt.window_size) + self.data.append(motion) + except: + # Some motion may not exist in KIT dataset + pass + + self.cumsum = np.cumsum([0] + self.lengths) + + if opt.is_train: + # root_rot_velocity (B, seq_len, 1) + std[0:1] = std[0:1] / opt.feat_bias + # root_linear_velocity (B, seq_len, 2) + std[1:3] = std[1:3] / opt.feat_bias + # root_y (B, seq_len, 1) + std[3:4] = std[3:4] / opt.feat_bias + # ric_data (B, seq_len, (joint_num - 1)*3) + std[4: 4 + (joints_num - 1) * 3] = std[4: 4 + (joints_num - 1) * 3] / 1.0 + # rot_data (B, seq_len, (joint_num - 1)*6) + std[4 + (joints_num - 1) * 3: 4 + (joints_num - 1) * 9] = std[4 + (joints_num - 1) * 3: 4 + ( + joints_num - 1) * 9] / 1.0 + # local_velocity (B, seq_len, joint_num*3) + std[4 + (joints_num - 1) * 9: 4 + (joints_num - 1) * 9 + joints_num * 3] = std[ + 4 + (joints_num - 1) * 9: 4 + ( + joints_num - 1) * 9 + joints_num * 3] / 1.0 + # foot contact (B, seq_len, 4) + std[4 + (joints_num - 1) * 9 + joints_num * 3:] = std[ + 4 + (joints_num - 1) * 9 + joints_num * 3:] / opt.feat_bias + + assert 4 + (joints_num - 1) * 9 + joints_num * 3 + 4 == mean.shape[-1] + np.save(pjoin(opt.meta_dir, 'mean.npy'), mean) + np.save(pjoin(opt.meta_dir, 'std.npy'), std) + + self.mean = mean + self.std = std + print("Total number of motions {}, snippets {}".format(len(self.data), self.cumsum[-1])) + + def inv_transform(self, data): + return data * self.std + self.mean + + def __len__(self): + return self.cumsum[-1] + + def __getitem__(self, item): + if item != 0: + motion_id = np.searchsorted(self.cumsum, item) - 1 + idx = item - self.cumsum[motion_id] - 1 + else: + motion_id = 0 + idx = 0 + motion = self.data[motion_id][idx:idx+self.opt.window_size] + "Z Normalization" + motion = (motion - self.mean) / self.std + + return motion + + +class RawTextDataset(data.Dataset): + def __init__(self, opt, mean, std, text_file, w_vectorizer): + self.mean = mean + self.std = std + self.opt = opt + self.data_dict = [] + self.nlp = spacy.load('en_core_web_sm') + + with cs.open(text_file) as f: + for line in f.readlines(): + word_list, pos_list = self.process_text(line.strip()) + tokens = ['%s/%s'%(word_list[i], pos_list[i]) for i in range(len(word_list))] + self.data_dict.append({'caption':line.strip(), "tokens":tokens}) + + self.w_vectorizer = w_vectorizer + print("Total number of descriptions {}".format(len(self.data_dict))) + + + def process_text(self, sentence): + sentence = sentence.replace('-', '') + doc = self.nlp(sentence) + word_list = [] + pos_list = [] + for token in doc: + word = token.text + if not word.isalpha(): + continue + if (token.pos_ == 'NOUN' or token.pos_ == 'VERB') and (word != 'left'): + word_list.append(token.lemma_) + else: + word_list.append(word) + pos_list.append(token.pos_) + return word_list, pos_list + + def inv_transform(self, data): + return data * self.std + self.mean + + def __len__(self): + return len(self.data_dict) + + def __getitem__(self, item): + data = self.data_dict[item] + caption, tokens = data['caption'], data['tokens'] + + if len(tokens) < self.opt.max_text_len: + # pad with "unk" + tokens = ['sos/OTHER'] + tokens + ['eos/OTHER'] + sent_len = len(tokens) + tokens = tokens + ['unk/OTHER'] * (self.opt.max_text_len + 2 - sent_len) + else: + # crop + tokens = tokens[:self.opt.max_text_len] + tokens = ['sos/OTHER'] + tokens + ['eos/OTHER'] + sent_len = len(tokens) + pos_one_hots = [] + word_embeddings = [] + for token in tokens: + word_emb, pos_oh = self.w_vectorizer[token] + pos_one_hots.append(pos_oh[None, :]) + word_embeddings.append(word_emb[None, :]) + pos_one_hots = np.concatenate(pos_one_hots, axis=0) + word_embeddings = np.concatenate(word_embeddings, axis=0) + + return word_embeddings, pos_one_hots, caption, sent_len + +class TextOnlyDataset(data.Dataset): + def __init__(self, opt, mean, std, split_file): + self.mean = mean + self.std = std + self.opt = opt + self.data_dict = [] + self.max_length = 20 + self.pointer = 0 + self.fixed_length = 120 + + + data_dict = {} + id_list = [] + with cs.open(split_file, 'r') as f: + for line in f.readlines(): + id_list.append(line.strip()) + # id_list = id_list[:200] + + new_name_list = [] + length_list = [] + for name in tqdm(id_list): + try: + text_data = [] + flag = False + with cs.open(pjoin(opt.text_dir, name + '.txt')) as f: + for line in f.readlines(): + text_dict = {} + line_split = line.strip().split('#') + caption = line_split[0] + tokens = line_split[1].split(' ') + f_tag = float(line_split[2]) + to_tag = float(line_split[3]) + f_tag = 0.0 if np.isnan(f_tag) else f_tag + to_tag = 0.0 if np.isnan(to_tag) else to_tag + + text_dict['caption'] = caption + text_dict['tokens'] = tokens + if f_tag == 0.0 and to_tag == 0.0: + flag = True + text_data.append(text_dict) + else: + try: + new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name + while new_name in data_dict: + new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name + data_dict[new_name] = {'text':[text_dict]} + new_name_list.append(new_name) + except: + print(line_split) + print(line_split[2], line_split[3], f_tag, to_tag, name) + # break + + if flag: + data_dict[name] = {'text': text_data} + new_name_list.append(name) + except: + pass + + self.length_arr = np.array(length_list) + self.data_dict = data_dict + self.name_list = new_name_list + + def inv_transform(self, data): + return data * self.std + self.mean + + def __len__(self): + return len(self.data_dict) + + def __getitem__(self, item): + idx = self.pointer + item + data = self.data_dict[self.name_list[idx]] + text_list = data['text'] + + # Randomly select a caption + text_data = random.choice(text_list) + caption, tokens = text_data['caption'], text_data['tokens'] + return None, None, caption, None, np.array([0]), self.fixed_length, None + # fixed_length can be set from outside before sampling + +# A wrapper class for t2m original dataset for MDM purposes +class HumanML3D(data.Dataset): + def __init__(self, mode, datapath='./dataset/humanml_opt.txt', split="train", **kwargs): + self.mode = mode + + self.dataset_name = 't2m' + self.dataname = 't2m' + + # Configurations of T2M dataset and KIT dataset is almost the same + abs_base_path = kwargs.get('abs_path', '.') + dataset_opt_path = pjoin(abs_base_path, datapath) + device = kwargs.get('device', None) + opt = get_opt(dataset_opt_path, device) + # opt.meta_dir = pjoin(abs_base_path, opt.meta_dir) + opt.cache_dir = kwargs.get('cache_path', '.') + opt.motion_dir = pjoin(abs_base_path, opt.motion_dir) + opt.text_dir = pjoin(abs_base_path, opt.text_dir) + opt.model_dir = pjoin(abs_base_path, opt.model_dir) + opt.checkpoints_dir = pjoin(abs_base_path, opt.checkpoints_dir) + opt.data_root = pjoin(abs_base_path, opt.data_root) + opt.save_root = pjoin(abs_base_path, opt.save_root) + opt.meta_dir = pjoin(abs_base_path, './dataset') + opt.use_cache = kwargs.get('use_cache', True) + opt.fixed_len = kwargs.get('fixed_len', 0) + if opt.fixed_len > 0: + opt.max_motion_length = opt.fixed_len + is_autoregressive = kwargs.get('autoregressive', False) + opt.disable_offset_aug = is_autoregressive and (opt.fixed_len > 0) and (mode == 'eval') # for autoregressive evaluation, use the start of the motion and not something from the middle + self.opt = opt + print('Loading dataset %s ...' % opt.dataset_name) + + if mode == 'gt': + # used by T2M models (including evaluators) + self.mean = np.load(pjoin(opt.meta_dir, f'{opt.dataset_name}_mean.npy')) + self.std = np.load(pjoin(opt.meta_dir, f'{opt.dataset_name}_std.npy')) + elif mode in ['train', 'eval', 'text_only']: + # used by our models + self.mean = np.load(pjoin(opt.data_root, 'Mean.npy')) + self.std = np.load(pjoin(opt.data_root, 'Std.npy')) + + if mode == 'eval': + # used by T2M models (including evaluators) + # this is to translate their norms to ours + self.mean_for_eval = np.load(pjoin(opt.meta_dir, f'{opt.dataset_name}_mean.npy')) + self.std_for_eval = np.load(pjoin(opt.meta_dir, f'{opt.dataset_name}_std.npy')) + + self.split_file = pjoin(opt.data_root, f'{split}.txt') + if mode == 'text_only': + self.t2m_dataset = TextOnlyDataset(self.opt, self.mean, self.std, self.split_file) + else: + self.w_vectorizer = WordVectorizer(pjoin(opt.cache_dir, 'glove'), 'our_vab') + self.t2m_dataset = Text2MotionDatasetV2(self.opt, self.mean, self.std, self.split_file, self.w_vectorizer) + self.num_actions = 1 # dummy placeholder + + self.mean_gpu = torch.tensor(self.mean).to(device)[None, :, None, None] + self.std_gpu = torch.tensor(self.std).to(device)[None, :, None, None] + + assert len(self.t2m_dataset) > 1, 'You loaded an empty dataset, ' \ + 'it is probably because your data dir has only texts and no motions.\n' \ + 'To train and evaluate MDM you should get the FULL data as described ' \ + 'in the README file.' + + def __getitem__(self, item): + return self.t2m_dataset.__getitem__(item) + + def __len__(self): + return self.t2m_dataset.__len__() + +# A wrapper class for t2m original dataset for MDM purposes +class KIT(HumanML3D): + def __init__(self, mode, datapath='./dataset/kit_opt.txt', split="train", **kwargs): + super(KIT, self).__init__(mode, datapath, split, **kwargs) \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml/motion_loaders/__init__.py b/motion_diffusion_model/data_loaders/humanml/motion_loaders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/motion_diffusion_model/data_loaders/humanml/motion_loaders/comp_v6_model_dataset.py b/motion_diffusion_model/data_loaders/humanml/motion_loaders/comp_v6_model_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..53d5e57e8e2cd536695f5d806a1a5cd4143bf7a8 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/motion_loaders/comp_v6_model_dataset.py @@ -0,0 +1,285 @@ +import torch +from data_loaders.humanml.networks.modules import * +from data_loaders.humanml.networks.trainers import CompTrainerV6 +from torch.utils.data import Dataset, DataLoader +from os.path import join as pjoin +from tqdm import tqdm +from utils import dist_util +from utils.sampler_util import AutoRegressiveSampler + + +def build_models(opt): + if opt.text_enc_mod == 'bigru': + text_encoder = TextEncoderBiGRU(word_size=opt.dim_word, + pos_size=opt.dim_pos_ohot, + hidden_size=opt.dim_text_hidden, + device=opt.device) + text_size = opt.dim_text_hidden * 2 + else: + raise Exception("Text Encoder Mode not Recognized!!!") + + seq_prior = TextDecoder(text_size=text_size, + input_size=opt.dim_att_vec + opt.dim_movement_latent, + output_size=opt.dim_z, + hidden_size=opt.dim_pri_hidden, + n_layers=opt.n_layers_pri) + + + seq_decoder = TextVAEDecoder(text_size=text_size, + input_size=opt.dim_att_vec + opt.dim_z + opt.dim_movement_latent, + output_size=opt.dim_movement_latent, + hidden_size=opt.dim_dec_hidden, + n_layers=opt.n_layers_dec) + + att_layer = AttLayer(query_dim=opt.dim_pos_hidden, + key_dim=text_size, + value_dim=opt.dim_att_vec) + + movement_enc = MovementConvEncoder(opt.dim_pose - 4, opt.dim_movement_enc_hidden, opt.dim_movement_latent) + movement_dec = MovementConvDecoder(opt.dim_movement_latent, opt.dim_movement_dec_hidden, opt.dim_pose) + + len_estimator = MotionLenEstimatorBiGRU(opt.dim_word, opt.dim_pos_ohot, 512, opt.num_classes) + + # latent_dis = LatentDis(input_size=opt.dim_z * 2) + checkpoints = torch.load(pjoin(opt.checkpoints_dir, opt.dataset_name, 'length_est_bigru', 'model', 'latest.tar'), map_location=opt.device) + len_estimator.load_state_dict(checkpoints['estimator']) + len_estimator.to(opt.device) + len_estimator.eval() + + # return text_encoder, text_decoder, att_layer, vae_pri, vae_dec, vae_pos, motion_dis, movement_dis, latent_dis + return text_encoder, seq_prior, seq_decoder, att_layer, movement_enc, movement_dec, len_estimator + +class CompV6GeneratedDataset(Dataset): + + def __init__(self, opt, dataset, w_vectorizer, mm_num_samples, mm_num_repeats): + assert mm_num_samples < len(dataset) + print(opt.model_dir) + + dataloader = DataLoader(dataset, batch_size=1, num_workers=1, shuffle=True) + text_enc, seq_pri, seq_dec, att_layer, mov_enc, mov_dec, len_estimator = build_models(opt) + trainer = CompTrainerV6(opt, text_enc, seq_pri, seq_dec, att_layer, mov_dec, mov_enc=mov_enc) + epoch, it, sub_ep, schedule_len = trainer.load(pjoin(opt.model_dir, opt.which_epoch + '.tar')) + generated_motion = [] + mm_generated_motions = [] + mm_idxs = np.random.choice(len(dataset), mm_num_samples, replace=False) + mm_idxs = np.sort(mm_idxs) + min_mov_length = 10 if opt.dataset_name == 't2m' else 6 + # print(mm_idxs) + + print('Loading model: Epoch %03d Schedule_len %03d' % (epoch, schedule_len)) + trainer.eval_mode() + trainer.to(opt.device) + with torch.no_grad(): + for i, data in tqdm(enumerate(dataloader)): + word_emb, pos_ohot, caption, cap_lens, motions, m_lens, tokens = data + tokens = tokens[0].split('_') + word_emb = word_emb.detach().to(opt.device).float() + pos_ohot = pos_ohot.detach().to(opt.device).float() + + pred_dis = len_estimator(word_emb, pos_ohot, cap_lens) + pred_dis = nn.Softmax(-1)(pred_dis).squeeze() + + mm_num_now = len(mm_generated_motions) + is_mm = True if ((mm_num_now < mm_num_samples) and (i == mm_idxs[mm_num_now])) else False + + repeat_times = mm_num_repeats if is_mm else 1 + mm_motions = [] + for t in range(repeat_times): + mov_length = torch.multinomial(pred_dis, 1, replacement=True) + if mov_length < min_mov_length: + mov_length = torch.multinomial(pred_dis, 1, replacement=True) + if mov_length < min_mov_length: + mov_length = torch.multinomial(pred_dis, 1, replacement=True) + + m_lens = mov_length * opt.unit_length + pred_motions, _, _ = trainer.generate(word_emb, pos_ohot, cap_lens, m_lens, + m_lens[0]//opt.unit_length, opt.dim_pose) + if t == 0: + # print(m_lens) + # print(text_data) + sub_dict = {'motion': pred_motions[0].cpu().numpy(), + 'length': m_lens[0].item(), + 'cap_len': cap_lens[0].item(), + 'caption': caption[0], + 'tokens': tokens} + generated_motion.append(sub_dict) + + if is_mm: + mm_motions.append({ + 'motion': pred_motions[0].cpu().numpy(), + 'length': m_lens[0].item() + }) + if is_mm: + mm_generated_motions.append({'caption': caption[0], + 'tokens': tokens, + 'cap_len': cap_lens[0].item(), + 'mm_motions': mm_motions}) + + self.generated_motion = generated_motion + self.mm_generated_motion = mm_generated_motions + self.opt = opt + self.w_vectorizer = w_vectorizer + + + def __len__(self): + return len(self.generated_motion) + + + def __getitem__(self, item): + data = self.generated_motion[item] + motion, m_length, caption, tokens = data['motion'], data['length'], data['caption'], data['tokens'] + sent_len = data['cap_len'] + + pos_one_hots = [] + word_embeddings = [] + for token in tokens: + word_emb, pos_oh = self.w_vectorizer[token] + pos_one_hots.append(pos_oh[None, :]) + word_embeddings.append(word_emb[None, :]) + pos_one_hots = np.concatenate(pos_one_hots, axis=0) + word_embeddings = np.concatenate(word_embeddings, axis=0) + + if m_length < self.opt.max_motion_length: + motion = np.concatenate([motion, + np.zeros((self.opt.max_motion_length - m_length, motion.shape[1])) + ], axis=0) + return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(tokens) + +class CompMDMGeneratedDataset(Dataset): + + def __init__(self, args, model, diffusion, dataloader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale=1.): + self.args = args + self.dataloader = dataloader + self.dataset = dataloader.dataset + self.model = model + assert mm_num_samples < len(dataloader.dataset) + use_ddim = False # FIXME - hardcoded + clip_denoised = False # FIXME - hardcoded + self.max_motion_length = max_motion_length + sample_fn = ( + diffusion.p_sample_loop if not use_ddim else diffusion.ddim_sample_loop + ) + if self.args.autoregressive: + sample_cls = AutoRegressiveSampler(args, sample_fn) + sample_fn = sample_cls.sample + + + real_num_batches = len(dataloader) + if num_samples_limit is not None: + real_num_batches = min(num_samples_limit // dataloader.batch_size + 1, real_num_batches) + print('real_num_batches', real_num_batches) + + generated_motion = [] + mm_generated_motions = [] + if mm_num_samples > 0: + mm_idxs = np.random.choice(real_num_batches, mm_num_samples // dataloader.batch_size +1, replace=False) + mm_idxs = np.sort(mm_idxs) + else: + mm_idxs = [] + print('mm_idxs', mm_idxs) + + model.eval() + + + with torch.no_grad(): + for i, (motion, model_kwargs) in tqdm(enumerate(dataloader)): + + if num_samples_limit is not None and len(generated_motion) >= num_samples_limit: + break + + model_kwargs['y'] = {key: val.to(dist_util.dev()) if torch.is_tensor(val) else val for key, val in model_kwargs['y'].items()} + motion = motion.to(dist_util.dev()) + + tokens = [t.split('_') for t in model_kwargs['y']['tokens']] + + # add CFG scale to batch + if scale != 1.: + model_kwargs['y']['scale'] = torch.ones(motion.shape[0], + device=dist_util.dev()) * scale + + mm_num_now = len(mm_generated_motions) // dataloader.batch_size + is_mm = i in mm_idxs + repeat_times = mm_num_repeats if is_mm else 1 + mm_motions = [] + for t in range(repeat_times): + + sample = sample_fn( + model, + motion.shape, + clip_denoised=clip_denoised, + model_kwargs=model_kwargs, + skip_timesteps=0, # 0 is the default value - i.e. don't skip any step + init_image=None, + progress=False, + dump_steps=None, + noise=None, + const_noise=False, + # when experimenting guidance_scale we want to nutrileze the effect of noise on generation + ) + + if 'prefix' in model_kwargs['y'].keys(): + model_kwargs['y']['lengths'] = model_kwargs['y']['orig_lengths'] + + if t == 0: + sub_dicts = [{ + 'motion': sample[bs_i].squeeze().permute(1, 0).cpu().numpy(), + 'length': model_kwargs['y']['lengths'][bs_i].cpu().numpy(), + 'caption': model_kwargs['y']['text'][bs_i], + 'tokens': tokens[bs_i], + # Fixed cap_len calculation, changed from len(tokens[bs_i]) + # Lead to improved R-precision and Multimodal Dist. + # issue: https://github.com/GuyTevet/motion-diffusion-model/issues/182 + 'cap_len': tokens[bs_i].index('eos/OTHER') + 1, + } for bs_i in range(dataloader.batch_size)] + generated_motion += sub_dicts + + if is_mm: + for bs_i in range(dataloader.batch_size): + mm_motion = sample[bs_i].squeeze().permute(1, 0).cpu().numpy() + if self.dataset.mode == 'eval': + mm_motion = self.dataset.t2m_dataset.inv_transform(mm_motion) + mm_motion = (mm_motion - self.dataset.mean_for_eval) / self.dataset.std_for_eval # according to T2M norms + + mm_motions.append({'motion': mm_motion, + 'length': model_kwargs['y']['lengths'][bs_i].cpu().numpy(), + }) + if is_mm: + mm_generated_motions += [{ + 'caption': model_kwargs['y']['text'][bs_i], + 'tokens': tokens[bs_i], + 'cap_len': len(tokens[bs_i]), + 'mm_motions': mm_motions[bs_i::dataloader.batch_size], # collect all 10 repeats from the (32*10) generated motions + } for bs_i in range(dataloader.batch_size)] + + + self.generated_motion = generated_motion + self.mm_generated_motion = mm_generated_motions + self.w_vectorizer = dataloader.dataset.w_vectorizer + + + def __len__(self): + return len(self.generated_motion) + + + def __getitem__(self, item): + data = self.generated_motion[item] + motion, m_length, caption, tokens = data['motion'], data['length'], data['caption'], data['tokens'] + sent_len = data['cap_len'] + + if self.dataset.mode == 'eval': + normed_motion = motion + denormed_motion = self.dataset.t2m_dataset.inv_transform(normed_motion) + renormed_motion = (denormed_motion - self.dataset.mean_for_eval) / self.dataset.std_for_eval # according to T2M norms + motion = renormed_motion + # This step is needed because T2M evaluators expect their norm convention + + pos_one_hots = [] + word_embeddings = [] + for token in tokens: + word_emb, pos_oh = self.w_vectorizer[token] + pos_one_hots.append(pos_oh[None, :]) + word_embeddings.append(word_emb[None, :]) + pos_one_hots = np.concatenate(pos_one_hots, axis=0) + word_embeddings = np.concatenate(word_embeddings, axis=0) + + return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(tokens) \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml/motion_loaders/dataset_motion_loader.py b/motion_diffusion_model/data_loaders/humanml/motion_loaders/dataset_motion_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..37fff1d8d7a61a26cdb4df1572bfb6fea22c34bf --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/motion_loaders/dataset_motion_loader.py @@ -0,0 +1,27 @@ +from t2m.data.dataset import Text2MotionDatasetV2, collate_fn +from t2m.utils.word_vectorizer import WordVectorizer +import numpy as np +from os.path import join as pjoin +from torch.utils.data import DataLoader +from t2m.utils.get_opt import get_opt + +def get_dataset_motion_loader(opt_path, batch_size, device): + opt = get_opt(opt_path, device) + + # Configurations of T2M dataset and KIT dataset is almost the same + if opt.dataset_name == 't2m' or opt.dataset_name == 'kit': + print('Loading dataset %s ...' % opt.dataset_name) + + mean = np.load(pjoin(opt.meta_dir, 'mean.npy')) + std = np.load(pjoin(opt.meta_dir, 'std.npy')) + + w_vectorizer = WordVectorizer('./glove', 'our_vab') + split_file = pjoin(opt.data_root, 'test.txt') + dataset = Text2MotionDatasetV2(opt, mean, std, split_file, w_vectorizer) + dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4, drop_last=True, + collate_fn=collate_fn, shuffle=True) + else: + raise KeyError('Dataset not Recognized !!') + + print('Ground Truth Dataset Loading Completed!!!') + return dataloader, dataset \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml/motion_loaders/model_motion_loaders.py b/motion_diffusion_model/data_loaders/humanml/motion_loaders/model_motion_loaders.py new file mode 100644 index 0000000000000000000000000000000000000000..3a7bacbeee26c0ac9a8fc22bf8d0493d0c2b7b49 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/motion_loaders/model_motion_loaders.py @@ -0,0 +1,91 @@ +from torch.utils.data import DataLoader, Dataset +from data_loaders.humanml.utils.get_opt import get_opt +from data_loaders.humanml.motion_loaders.comp_v6_model_dataset import CompMDMGeneratedDataset +from data_loaders.humanml.utils.word_vectorizer import WordVectorizer +import numpy as np +from torch.utils.data._utils.collate import default_collate + + +def collate_fn(batch): + batch.sort(key=lambda x: x[3], reverse=True) + return default_collate(batch) + + +class MMGeneratedDataset(Dataset): + def __init__(self, opt, motion_dataset, w_vectorizer): + self.opt = opt + self.dataset = motion_dataset.mm_generated_motion + self.w_vectorizer = w_vectorizer + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, item): + data = self.dataset[item] + mm_motions = data['mm_motions'] + m_lens = [] + motions = [] + for mm_motion in mm_motions: + m_lens.append(mm_motion['length']) + motion = mm_motion['motion'] + # We don't need the following logic because our sample func generates the full tensor anyway: + # if len(motion) < self.opt.max_motion_length: + # motion = np.concatenate([motion, + # np.zeros((self.opt.max_motion_length - len(motion), motion.shape[1])) + # ], axis=0) + motion = motion[None, :] + motions.append(motion) + m_lens = np.array(m_lens, dtype=np.int) + motions = np.concatenate(motions, axis=0) + sort_indx = np.argsort(m_lens)[::-1].copy() + # print(m_lens) + # print(sort_indx) + # print(m_lens[sort_indx]) + m_lens = m_lens[sort_indx] + motions = motions[sort_indx] + return motions, m_lens + + + +def get_motion_loader(opt_path, batch_size, ground_truth_dataset, mm_num_samples, mm_num_repeats, device): + opt = get_opt(opt_path, device) + + # Currently the configurations of two datasets are almost the same + if opt.dataset_name == 't2m' or opt.dataset_name == 'kit': + w_vectorizer = WordVectorizer('./glove', 'our_vab') + else: + raise KeyError('Dataset not recognized!!') + print('Generating %s ...' % opt.name) + + if 'v6' in opt.name: + dataset = CompV6GeneratedDataset(opt, ground_truth_dataset, w_vectorizer, mm_num_samples, mm_num_repeats) + else: + raise KeyError('Dataset not recognized!!') + + mm_dataset = MMGeneratedDataset(opt, dataset, w_vectorizer) + + motion_loader = DataLoader(dataset, batch_size=batch_size, collate_fn=collate_fn, drop_last=True, num_workers=4) + mm_motion_loader = DataLoader(mm_dataset, batch_size=1, num_workers=1) + + print('Generated Dataset Loading Completed!!!') + + return motion_loader, mm_motion_loader + +# our loader +def get_mdm_loader(args, model, diffusion, batch_size, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale): + opt = { + 'name': 'test', # FIXME + } + print('Generating %s ...' % opt['name']) + # dataset = CompMDMGeneratedDataset(opt, ground_truth_dataset, ground_truth_dataset.w_vectorizer, mm_num_samples, mm_num_repeats) + dataset = CompMDMGeneratedDataset(args, model, diffusion, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale) + + mm_dataset = MMGeneratedDataset(opt, dataset, ground_truth_loader.dataset.w_vectorizer) + + # NOTE: bs must not be changed! this will cause a bug in R precision calc! + motion_loader = DataLoader(dataset, batch_size=batch_size, collate_fn=collate_fn, drop_last=True, num_workers=4) + mm_motion_loader = DataLoader(mm_dataset, batch_size=1, num_workers=1) + + print('Generated Dataset Loading Completed!!!') + + return motion_loader, mm_motion_loader \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml/networks/__init__.py b/motion_diffusion_model/data_loaders/humanml/networks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/motion_diffusion_model/data_loaders/humanml/networks/evaluator_wrapper.py b/motion_diffusion_model/data_loaders/humanml/networks/evaluator_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..87f9d569c67ad00cd265654c03afc38f152029cb --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/networks/evaluator_wrapper.py @@ -0,0 +1,187 @@ +from data_loaders.humanml.networks.modules import * +from data_loaders.humanml.utils.word_vectorizer import POS_enumerator +from os.path import join as pjoin + +def build_models(opt): + movement_enc = MovementConvEncoder(opt.dim_pose-4, opt.dim_movement_enc_hidden, opt.dim_movement_latent) + text_enc = TextEncoderBiGRUCo(word_size=opt.dim_word, + pos_size=opt.dim_pos_ohot, + hidden_size=opt.dim_text_hidden, + output_size=opt.dim_coemb_hidden, + device=opt.device) + + motion_enc = MotionEncoderBiGRUCo(input_size=opt.dim_movement_latent, + hidden_size=opt.dim_motion_hidden, + output_size=opt.dim_coemb_hidden, + device=opt.device) + + checkpoint = torch.load(pjoin(opt.checkpoints_dir, opt.dataset_name, 'text_mot_match', 'model', 'finest.tar'), + map_location=opt.device) + movement_enc.load_state_dict(checkpoint['movement_encoder']) + text_enc.load_state_dict(checkpoint['text_encoder']) + motion_enc.load_state_dict(checkpoint['motion_encoder']) + print('Loading Evaluation Model Wrapper (Epoch %d) Completed!!' % (checkpoint['epoch'])) + return text_enc, motion_enc, movement_enc + + +class EvaluatorModelWrapper(object): + + def __init__(self, opt): + + if opt.dataset_name == 't2m': + opt.dim_pose = 263 + elif opt.dataset_name == 'kit': + opt.dim_pose = 251 + else: + raise KeyError('Dataset not Recognized!!!') + + opt.dim_word = 300 + opt.max_motion_length = 196 + opt.dim_pos_ohot = len(POS_enumerator) + opt.dim_motion_hidden = 1024 + opt.max_text_len = 20 + opt.dim_text_hidden = 512 + opt.dim_coemb_hidden = 512 + + self.text_encoder, self.motion_encoder, self.movement_encoder = build_models(opt) + self.opt = opt + self.device = opt.device + + self.text_encoder.to(opt.device) + self.motion_encoder.to(opt.device) + self.movement_encoder.to(opt.device) + + self.text_encoder.eval() + self.motion_encoder.eval() + self.movement_encoder.eval() + + # Please note that the results does not following the order of inputs + def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens): + with torch.no_grad(): + word_embs = word_embs.detach().to(self.device).float() + pos_ohot = pos_ohot.detach().to(self.device).float() + motions = motions.detach().to(self.device).float() + + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + m_lens = m_lens[align_idx] + + '''Movement Encoding''' + movements = self.movement_encoder(motions[..., :-4]).detach() + m_lens = m_lens // self.opt.unit_length + motion_embedding = self.motion_encoder(movements, m_lens) + + '''Text Encoding''' + text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens) + text_embedding = text_embedding[align_idx] + return text_embedding, motion_embedding + + # Please note that the results does not following the order of inputs + def get_motion_embeddings(self, motions, m_lens): + with torch.no_grad(): + motions = motions.detach().to(self.device).float() + + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + m_lens = m_lens[align_idx] + + '''Movement Encoding''' + movements = self.movement_encoder(motions[..., :-4]).detach() + m_lens = m_lens // self.opt.unit_length + motion_embedding = self.motion_encoder(movements, m_lens) + return motion_embedding + +# our version +def build_evaluators(opt): + movement_enc = MovementConvEncoder(opt['dim_pose']-4, opt['dim_movement_enc_hidden'], opt['dim_movement_latent']) + text_enc = TextEncoderBiGRUCo(word_size=opt['dim_word'], + pos_size=opt['dim_pos_ohot'], + hidden_size=opt['dim_text_hidden'], + output_size=opt['dim_coemb_hidden'], + device=opt['device']) + + motion_enc = MotionEncoderBiGRUCo(input_size=opt['dim_movement_latent'], + hidden_size=opt['dim_motion_hidden'], + output_size=opt['dim_coemb_hidden'], + device=opt['device']) + + ckpt_dir = opt['dataset_name'] + if opt['dataset_name'] == 'humanml': + ckpt_dir = 't2m' + + checkpoint = torch.load(pjoin(opt['checkpoints_dir'], ckpt_dir, 'text_mot_match', 'model', 'finest.tar'), + map_location=opt['device']) + movement_enc.load_state_dict(checkpoint['movement_encoder']) + text_enc.load_state_dict(checkpoint['text_encoder']) + motion_enc.load_state_dict(checkpoint['motion_encoder']) + print('Loading Evaluation Model Wrapper (Epoch %d) Completed!!' % (checkpoint['epoch'])) + return text_enc, motion_enc, movement_enc + +# our wrapper +class EvaluatorMDMWrapper(object): + + def __init__(self, dataset_name, device): + opt = { + 'dataset_name': dataset_name, + 'device': device, + 'dim_word': 300, + 'max_motion_length': 196, + 'dim_pos_ohot': len(POS_enumerator), + 'dim_motion_hidden': 1024, + 'max_text_len': 20, + 'dim_text_hidden': 512, + 'dim_coemb_hidden': 512, + 'dim_pose': 263 if dataset_name == 'humanml' else 251, + 'dim_movement_enc_hidden': 512, + 'dim_movement_latent': 512, + 'checkpoints_dir': '.', + 'unit_length': 4, + } + + self.text_encoder, self.motion_encoder, self.movement_encoder = build_evaluators(opt) + self.opt = opt + self.device = opt['device'] + + self.text_encoder.to(opt['device']) + self.motion_encoder.to(opt['device']) + self.movement_encoder.to(opt['device']) + + self.text_encoder.eval() + self.motion_encoder.eval() + self.movement_encoder.eval() + + # Please note that the results does not following the order of inputs + def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens): + with torch.no_grad(): + word_embs = word_embs.detach().to(self.device).float() + pos_ohot = pos_ohot.detach().to(self.device).float() + motions = motions.detach().to(self.device).float() + + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + m_lens = m_lens[align_idx] + + '''Movement Encoding''' + movements = self.movement_encoder(motions[..., :-4]).detach() + m_lens = m_lens // self.opt['unit_length'] + motion_embedding = self.motion_encoder(movements, m_lens) + + '''Text Encoding''' + text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens) + text_embedding = text_embedding[align_idx] + return text_embedding, motion_embedding + + # Please note that the results does not following the order of inputs + def get_motion_embeddings(self, motions, m_lens): + with torch.no_grad(): + motions = motions.detach().to(self.device).float() + + align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + motions = motions[align_idx] + m_lens = m_lens[align_idx] + + '''Movement Encoding''' + movements = self.movement_encoder(motions[..., :-4]).detach() + m_lens = m_lens // self.opt['unit_length'] + motion_embedding = self.motion_encoder(movements, m_lens) + return motion_embedding \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml/networks/modules.py b/motion_diffusion_model/data_loaders/humanml/networks/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..3177738d3f029a65fb4b26538d607d95fb1c84b7 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/networks/modules.py @@ -0,0 +1,438 @@ +import torch +import torch.nn as nn +import numpy as np +import time +import math +from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence +# from networks.layers import * +import torch.nn.functional as F + + +class ContrastiveLoss(torch.nn.Module): + """ + Contrastive loss function. + Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf + """ + def __init__(self, margin=3.0): + super(ContrastiveLoss, self).__init__() + self.margin = margin + + def forward(self, output1, output2, label): + euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True) + loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) + + (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2)) + return loss_contrastive + + +def init_weight(m): + if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose1d): + nn.init.xavier_normal_(m.weight) + # m.bias.data.fill_(0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + +def reparameterize(mu, logvar): + s_var = logvar.mul(0.5).exp_() + eps = s_var.data.new(s_var.size()).normal_() + return eps.mul(s_var).add_(mu) + + +# batch_size, dimension and position +# output: (batch_size, dim) +def positional_encoding(batch_size, dim, pos): + assert batch_size == pos.shape[0] + positions_enc = np.array([ + [pos[j] / np.power(10000, (i-i%2)/dim) for i in range(dim)] + for j in range(batch_size) + ], dtype=np.float32) + positions_enc[:, 0::2] = np.sin(positions_enc[:, 0::2]) + positions_enc[:, 1::2] = np.cos(positions_enc[:, 1::2]) + return torch.from_numpy(positions_enc).float() + + +def get_padding_mask(batch_size, seq_len, cap_lens): + cap_lens = cap_lens.data.tolist() + mask_2d = torch.ones((batch_size, seq_len, seq_len), dtype=torch.float32) + for i, cap_len in enumerate(cap_lens): + mask_2d[i, :, :cap_len] = 0 + return mask_2d.bool(), 1 - mask_2d[:, :, 0].clone() + + +class PositionalEncoding(nn.Module): + + def __init__(self, d_model, max_len=300): + super(PositionalEncoding, self).__init__() + + pe = torch.zeros(max_len, d_model) + position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) + div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + # pe = pe.unsqueeze(0).transpose(0, 1) + self.register_buffer('pe', pe) + + def forward(self, pos): + return self.pe[pos] + + +class MovementConvEncoder(nn.Module): + def __init__(self, input_size, hidden_size, output_size): + super(MovementConvEncoder, self).__init__() + self.main = nn.Sequential( + nn.Conv1d(input_size, hidden_size, 4, 2, 1), + nn.Dropout(0.2, inplace=True), + nn.LeakyReLU(0.2, inplace=True), + nn.Conv1d(hidden_size, output_size, 4, 2, 1), + nn.Dropout(0.2, inplace=True), + nn.LeakyReLU(0.2, inplace=True), + ) + self.out_net = nn.Linear(output_size, output_size) + self.main.apply(init_weight) + self.out_net.apply(init_weight) + + def forward(self, inputs): + inputs = inputs.permute(0, 2, 1) + outputs = self.main(inputs).permute(0, 2, 1) + # print(outputs.shape) + return self.out_net(outputs) + + +class MovementConvDecoder(nn.Module): + def __init__(self, input_size, hidden_size, output_size): + super(MovementConvDecoder, self).__init__() + self.main = nn.Sequential( + nn.ConvTranspose1d(input_size, hidden_size, 4, 2, 1), + # nn.Dropout(0.2, inplace=True), + nn.LeakyReLU(0.2, inplace=True), + nn.ConvTranspose1d(hidden_size, output_size, 4, 2, 1), + # nn.Dropout(0.2, inplace=True), + nn.LeakyReLU(0.2, inplace=True), + ) + self.out_net = nn.Linear(output_size, output_size) + + self.main.apply(init_weight) + self.out_net.apply(init_weight) + + def forward(self, inputs): + inputs = inputs.permute(0, 2, 1) + outputs = self.main(inputs).permute(0, 2, 1) + return self.out_net(outputs) + + +class TextVAEDecoder(nn.Module): + def __init__(self, text_size, input_size, output_size, hidden_size, n_layers): + super(TextVAEDecoder, self).__init__() + self.input_size = input_size + self.output_size = output_size + self.hidden_size = hidden_size + self.n_layers = n_layers + self.emb = nn.Sequential( + nn.Linear(input_size, hidden_size), + nn.LayerNorm(hidden_size), + nn.LeakyReLU(0.2, inplace=True)) + + self.z2init = nn.Linear(text_size, hidden_size * n_layers) + self.gru = nn.ModuleList([nn.GRUCell(hidden_size, hidden_size) for i in range(self.n_layers)]) + self.positional_encoder = PositionalEncoding(hidden_size) + + + self.output = nn.Sequential( + nn.Linear(hidden_size, hidden_size), + nn.LayerNorm(hidden_size), + nn.LeakyReLU(0.2, inplace=True), + nn.Linear(hidden_size, output_size) + ) + + # + # self.output = nn.Sequential( + # nn.Linear(hidden_size, hidden_size), + # nn.LayerNorm(hidden_size), + # nn.LeakyReLU(0.2, inplace=True), + # nn.Linear(hidden_size, output_size-4) + # ) + + # self.contact_net = nn.Sequential( + # nn.Linear(output_size-4, 64), + # nn.LayerNorm(64), + # nn.LeakyReLU(0.2, inplace=True), + # nn.Linear(64, 4) + # ) + + self.output.apply(init_weight) + self.emb.apply(init_weight) + self.z2init.apply(init_weight) + # self.contact_net.apply(init_weight) + + def get_init_hidden(self, latent): + hidden = self.z2init(latent) + hidden = torch.split(hidden, self.hidden_size, dim=-1) + return list(hidden) + + def forward(self, inputs, last_pred, hidden, p): + h_in = self.emb(inputs) + pos_enc = self.positional_encoder(p).to(inputs.device).detach() + h_in = h_in + pos_enc + for i in range(self.n_layers): + # print(h_in.shape) + hidden[i] = self.gru[i](h_in, hidden[i]) + h_in = hidden[i] + pose_pred = self.output(h_in) + # pose_pred = self.output(h_in) + last_pred.detach() + # contact = self.contact_net(pose_pred) + # return torch.cat([pose_pred, contact], dim=-1), hidden + return pose_pred, hidden + + +class TextDecoder(nn.Module): + def __init__(self, text_size, input_size, output_size, hidden_size, n_layers): + super(TextDecoder, self).__init__() + self.input_size = input_size + self.output_size = output_size + self.hidden_size = hidden_size + self.n_layers = n_layers + self.emb = nn.Sequential( + nn.Linear(input_size, hidden_size), + nn.LayerNorm(hidden_size), + nn.LeakyReLU(0.2, inplace=True)) + + self.gru = nn.ModuleList([nn.GRUCell(hidden_size, hidden_size) for i in range(self.n_layers)]) + self.z2init = nn.Linear(text_size, hidden_size * n_layers) + self.positional_encoder = PositionalEncoding(hidden_size) + + self.mu_net = nn.Linear(hidden_size, output_size) + self.logvar_net = nn.Linear(hidden_size, output_size) + + self.emb.apply(init_weight) + self.z2init.apply(init_weight) + self.mu_net.apply(init_weight) + self.logvar_net.apply(init_weight) + + def get_init_hidden(self, latent): + + hidden = self.z2init(latent) + hidden = torch.split(hidden, self.hidden_size, dim=-1) + + return list(hidden) + + def forward(self, inputs, hidden, p): + # print(inputs.shape) + x_in = self.emb(inputs) + pos_enc = self.positional_encoder(p).to(inputs.device).detach() + x_in = x_in + pos_enc + + for i in range(self.n_layers): + hidden[i] = self.gru[i](x_in, hidden[i]) + h_in = hidden[i] + mu = self.mu_net(h_in) + logvar = self.logvar_net(h_in) + z = reparameterize(mu, logvar) + return z, mu, logvar, hidden + +class AttLayer(nn.Module): + def __init__(self, query_dim, key_dim, value_dim): + super(AttLayer, self).__init__() + self.W_q = nn.Linear(query_dim, value_dim) + self.W_k = nn.Linear(key_dim, value_dim, bias=False) + self.W_v = nn.Linear(key_dim, value_dim) + + self.softmax = nn.Softmax(dim=1) + self.dim = value_dim + + self.W_q.apply(init_weight) + self.W_k.apply(init_weight) + self.W_v.apply(init_weight) + + def forward(self, query, key_mat): + ''' + query (batch, query_dim) + key (batch, seq_len, key_dim) + ''' + # print(query.shape) + query_vec = self.W_q(query).unsqueeze(-1) # (batch, value_dim, 1) + val_set = self.W_v(key_mat) # (batch, seq_len, value_dim) + key_set = self.W_k(key_mat) # (batch, seq_len, value_dim) + + weights = torch.matmul(key_set, query_vec) / np.sqrt(self.dim) + + co_weights = self.softmax(weights) # (batch, seq_len, 1) + values = val_set * co_weights # (batch, seq_len, value_dim) + pred = values.sum(dim=1) # (batch, value_dim) + return pred, co_weights + + def short_cut(self, querys, keys): + return self.W_q(querys), self.W_k(keys) + + +class TextEncoderBiGRU(nn.Module): + def __init__(self, word_size, pos_size, hidden_size, device): + super(TextEncoderBiGRU, self).__init__() + self.device = device + + self.pos_emb = nn.Linear(pos_size, word_size) + self.input_emb = nn.Linear(word_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) + # self.linear2 = nn.Linear(hidden_size, output_size) + + self.input_emb.apply(init_weight) + self.pos_emb.apply(init_weight) + # self.linear2.apply(init_weight) + # self.batch_size = batch_size + self.hidden_size = hidden_size + self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) + + # input(batch_size, seq_len, dim) + def forward(self, word_embs, pos_onehot, cap_lens): + num_samples = word_embs.shape[0] + + pos_embs = self.pos_emb(pos_onehot) + inputs = word_embs + pos_embs + input_embs = self.input_emb(inputs) + hidden = self.hidden.repeat(1, num_samples, 1) + + cap_lens = cap_lens.data.tolist() + emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) + + gru_seq, gru_last = self.gru(emb, hidden) + + gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) + gru_seq = pad_packed_sequence(gru_seq, batch_first=True)[0] + forward_seq = gru_seq[..., :self.hidden_size] + backward_seq = gru_seq[..., self.hidden_size:].clone() + + # Concate the forward and backward word embeddings + for i, length in enumerate(cap_lens): + backward_seq[i:i+1, :length] = torch.flip(backward_seq[i:i+1, :length].clone(), dims=[1]) + gru_seq = torch.cat([forward_seq, backward_seq], dim=-1) + + return gru_seq, gru_last + + +class TextEncoderBiGRUCo(nn.Module): + def __init__(self, word_size, pos_size, hidden_size, output_size, device): + super(TextEncoderBiGRUCo, self).__init__() + self.device = device + + self.pos_emb = nn.Linear(pos_size, word_size) + self.input_emb = nn.Linear(word_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) + self.output_net = nn.Sequential( + nn.Linear(hidden_size * 2, hidden_size), + nn.LayerNorm(hidden_size), + nn.LeakyReLU(0.2, inplace=True), + nn.Linear(hidden_size, output_size) + ) + + self.input_emb.apply(init_weight) + self.pos_emb.apply(init_weight) + self.output_net.apply(init_weight) + # self.linear2.apply(init_weight) + # self.batch_size = batch_size + self.hidden_size = hidden_size + self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) + + # input(batch_size, seq_len, dim) + def forward(self, word_embs, pos_onehot, cap_lens): + num_samples = word_embs.shape[0] + + pos_embs = self.pos_emb(pos_onehot) + inputs = word_embs + pos_embs + input_embs = self.input_emb(inputs) + hidden = self.hidden.repeat(1, num_samples, 1) + + cap_lens = cap_lens.data.tolist() + emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) + + gru_seq, gru_last = self.gru(emb, hidden) + + gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) + + return self.output_net(gru_last) + + +class MotionEncoderBiGRUCo(nn.Module): + def __init__(self, input_size, hidden_size, output_size, device): + super(MotionEncoderBiGRUCo, self).__init__() + self.device = device + + self.input_emb = nn.Linear(input_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) + self.output_net = nn.Sequential( + nn.Linear(hidden_size*2, hidden_size), + nn.LayerNorm(hidden_size), + nn.LeakyReLU(0.2, inplace=True), + nn.Linear(hidden_size, output_size) + ) + + self.input_emb.apply(init_weight) + self.output_net.apply(init_weight) + self.hidden_size = hidden_size + self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) + + # input(batch_size, seq_len, dim) + def forward(self, inputs, m_lens): + num_samples = inputs.shape[0] + + input_embs = self.input_emb(inputs) + hidden = self.hidden.repeat(1, num_samples, 1) + + cap_lens = m_lens.data.tolist() + emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) + + gru_seq, gru_last = self.gru(emb, hidden) + + gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) + + return self.output_net(gru_last) + + +class MotionLenEstimatorBiGRU(nn.Module): + def __init__(self, word_size, pos_size, hidden_size, output_size): + super(MotionLenEstimatorBiGRU, self).__init__() + + self.pos_emb = nn.Linear(pos_size, word_size) + self.input_emb = nn.Linear(word_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True) + nd = 512 + self.output = nn.Sequential( + nn.Linear(hidden_size*2, nd), + nn.LayerNorm(nd), + nn.LeakyReLU(0.2, inplace=True), + + nn.Linear(nd, nd // 2), + nn.LayerNorm(nd // 2), + nn.LeakyReLU(0.2, inplace=True), + + nn.Linear(nd // 2, nd // 4), + nn.LayerNorm(nd // 4), + nn.LeakyReLU(0.2, inplace=True), + + nn.Linear(nd // 4, output_size) + ) + # self.linear2 = nn.Linear(hidden_size, output_size) + + self.input_emb.apply(init_weight) + self.pos_emb.apply(init_weight) + self.output.apply(init_weight) + # self.linear2.apply(init_weight) + # self.batch_size = batch_size + self.hidden_size = hidden_size + self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True)) + + # input(batch_size, seq_len, dim) + def forward(self, word_embs, pos_onehot, cap_lens): + num_samples = word_embs.shape[0] + + pos_embs = self.pos_emb(pos_onehot) + inputs = word_embs + pos_embs + input_embs = self.input_emb(inputs) + hidden = self.hidden.repeat(1, num_samples, 1) + + cap_lens = cap_lens.data.tolist() + emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True) + + gru_seq, gru_last = self.gru(emb, hidden) + + gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1) + + return self.output(gru_last) diff --git a/motion_diffusion_model/data_loaders/humanml/networks/trainers.py b/motion_diffusion_model/data_loaders/humanml/networks/trainers.py new file mode 100644 index 0000000000000000000000000000000000000000..123f497a7893cc85d915fde63add9338738f1d03 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/networks/trainers.py @@ -0,0 +1,1089 @@ +import torch +import torch.nn.functional as F +import random +from data_loaders.humanml.networks.modules import * +from torch.utils.data import DataLoader +import torch.optim as optim +from torch.nn.utils import clip_grad_norm_ +# import tensorflow as tf +from collections import OrderedDict +from data_loaders.humanml.utils.utils import * +from os.path import join as pjoin +from data_loaders.humanml.data.dataset import collate_fn +import codecs as cs + + +class Logger(object): + def __init__(self, log_dir): + self.writer = tf.summary.create_file_writer(log_dir) + + def scalar_summary(self, tag, value, step): + with self.writer.as_default(): + tf.summary.scalar(tag, value, step=step) + self.writer.flush() + +class DecompTrainerV3(object): + def __init__(self, args, movement_enc, movement_dec): + self.opt = args + self.movement_enc = movement_enc + self.movement_dec = movement_dec + self.device = args.device + + if args.is_train: + self.logger = Logger(args.log_dir) + self.sml1_criterion = torch.nn.SmoothL1Loss() + self.l1_criterion = torch.nn.L1Loss() + self.mse_criterion = torch.nn.MSELoss() + + + @staticmethod + def zero_grad(opt_list): + for opt in opt_list: + opt.zero_grad() + + @staticmethod + def clip_norm(network_list): + for network in network_list: + clip_grad_norm_(network.parameters(), 0.5) + + @staticmethod + def step(opt_list): + for opt in opt_list: + opt.step() + + def forward(self, batch_data): + motions = batch_data + self.motions = motions.detach().to(self.device).float() + self.latents = self.movement_enc(self.motions[..., :-4]) + self.recon_motions = self.movement_dec(self.latents) + + def backward(self): + self.loss_rec = self.l1_criterion(self.recon_motions, self.motions) + # self.sml1_criterion(self.recon_motions[:, 1:] - self.recon_motions[:, :-1], + # self.motions[:, 1:] - self.recon_motions[:, :-1]) + self.loss_sparsity = torch.mean(torch.abs(self.latents)) + self.loss_smooth = self.l1_criterion(self.latents[:, 1:], self.latents[:, :-1]) + self.loss = self.loss_rec + self.loss_sparsity * self.opt.lambda_sparsity +\ + self.loss_smooth*self.opt.lambda_smooth + + def update(self): + # time0 = time.time() + self.zero_grad([self.opt_movement_enc, self.opt_movement_dec]) + # time1 = time.time() + # print('\t Zero_grad Time: %.5f s' % (time1 - time0)) + self.backward() + # time2 = time.time() + # print('\t Backward Time: %.5f s' % (time2 - time1)) + self.loss.backward() + # time3 = time.time() + # print('\t Loss backward Time: %.5f s' % (time3 - time2)) + # self.clip_norm([self.movement_enc, self.movement_dec]) + # time4 = time.time() + # print('\t Clip_norm Time: %.5f s' % (time4 - time3)) + self.step([self.opt_movement_enc, self.opt_movement_dec]) + # time5 = time.time() + # print('\t Step Time: %.5f s' % (time5 - time4)) + + loss_logs = OrderedDict({}) + loss_logs['loss'] = self.loss_rec.item() + loss_logs['loss_rec'] = self.loss_rec.item() + loss_logs['loss_sparsity'] = self.loss_sparsity.item() + loss_logs['loss_smooth'] = self.loss_smooth.item() + return loss_logs + + def save(self, file_name, ep, total_it): + state = { + 'movement_enc': self.movement_enc.state_dict(), + 'movement_dec': self.movement_dec.state_dict(), + + 'opt_movement_enc': self.opt_movement_enc.state_dict(), + 'opt_movement_dec': self.opt_movement_dec.state_dict(), + + 'ep': ep, + 'total_it': total_it, + } + torch.save(state, file_name) + return + + def resume(self, model_dir): + checkpoint = torch.load(model_dir, map_location=self.device) + + self.movement_dec.load_state_dict(checkpoint['movement_dec']) + self.movement_enc.load_state_dict(checkpoint['movement_enc']) + + self.opt_movement_enc.load_state_dict(checkpoint['opt_movement_enc']) + self.opt_movement_dec.load_state_dict(checkpoint['opt_movement_dec']) + + return checkpoint['ep'], checkpoint['total_it'] + + def train(self, train_dataloader, val_dataloader, plot_eval): + self.movement_enc.to(self.device) + self.movement_dec.to(self.device) + + self.opt_movement_enc = optim.Adam(self.movement_enc.parameters(), lr=self.opt.lr) + self.opt_movement_dec = optim.Adam(self.movement_dec.parameters(), lr=self.opt.lr) + + epoch = 0 + it = 0 + + if self.opt.is_continue: + model_dir = pjoin(self.opt.model_dir, 'latest.tar') + epoch, it = self.resume(model_dir) + + start_time = time.time() + total_iters = self.opt.max_epoch * len(train_dataloader) + print('Iters Per Epoch, Training: %04d, Validation: %03d' % (len(train_dataloader), len(val_dataloader))) + val_loss = 0 + logs = OrderedDict() + while epoch < self.opt.max_epoch: + # time0 = time.time() + for i, batch_data in enumerate(train_dataloader): + self.movement_dec.train() + self.movement_enc.train() + + # time1 = time.time() + # print('DataLoader Time: %.5f s'%(time1-time0) ) + self.forward(batch_data) + # time2 = time.time() + # print('Forward Time: %.5f s'%(time2-time1)) + log_dict = self.update() + # time3 = time.time() + # print('Update Time: %.5f s' % (time3 - time2)) + # time0 = time3 + for k, v in log_dict.items(): + if k not in logs: + logs[k] = v + else: + logs[k] += v + + it += 1 + if it % self.opt.log_every == 0: + mean_loss = OrderedDict({'val_loss': val_loss}) + self.logger.scalar_summary('val_loss', val_loss, it) + + for tag, value in logs.items(): + self.logger.scalar_summary(tag, value / self.opt.log_every, it) + mean_loss[tag] = value / self.opt.log_every + logs = OrderedDict() + print_current_loss_decomp(start_time, it, total_iters, mean_loss, epoch, i) + + if it % self.opt.save_latest == 0: + self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it) + + self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it) + + epoch += 1 + if epoch % self.opt.save_every_e == 0: + self.save(pjoin(self.opt.model_dir, 'E%04d.tar' % (epoch)), epoch, total_it=it) + + print('Validation time:') + + val_loss = 0 + val_rec_loss = 0 + val_sparcity_loss = 0 + val_smooth_loss = 0 + with torch.no_grad(): + for i, batch_data in enumerate(val_dataloader): + self.forward(batch_data) + self.backward() + val_rec_loss += self.loss_rec.item() + val_smooth_loss += self.loss.item() + val_sparcity_loss += self.loss_sparsity.item() + val_smooth_loss += self.loss_smooth.item() + val_loss += self.loss.item() + + val_loss = val_loss / (len(val_dataloader) + 1) + val_rec_loss = val_rec_loss / (len(val_dataloader) + 1) + val_sparcity_loss = val_sparcity_loss / (len(val_dataloader) + 1) + val_smooth_loss = val_smooth_loss / (len(val_dataloader) + 1) + print('Validation Loss: %.5f Reconstruction Loss: %.5f ' + 'Sparsity Loss: %.5f Smooth Loss: %.5f' % (val_loss, val_rec_loss, val_sparcity_loss, \ + val_smooth_loss)) + + if epoch % self.opt.eval_every_e == 0: + data = torch.cat([self.recon_motions[:4], self.motions[:4]], dim=0).detach().cpu().numpy() + save_dir = pjoin(self.opt.eval_dir, 'E%04d' % (epoch)) + os.makedirs(save_dir, exist_ok=True) + plot_eval(data, save_dir) + + +# VAE Sequence Decoder/Prior/Posterior latent by latent +class CompTrainerV6(object): + + def __init__(self, args, text_enc, seq_pri, seq_dec, att_layer, mov_dec, mov_enc=None, seq_post=None): + self.opt = args + self.text_enc = text_enc + self.seq_pri = seq_pri + self.att_layer = att_layer + self.device = args.device + self.seq_dec = seq_dec + self.mov_dec = mov_dec + self.mov_enc = mov_enc + + if args.is_train: + self.seq_post = seq_post + # self.motion_dis + self.logger = Logger(args.log_dir) + self.l1_criterion = torch.nn.SmoothL1Loss() + self.gan_criterion = torch.nn.BCEWithLogitsLoss() + self.mse_criterion = torch.nn.MSELoss() + + @staticmethod + def reparametrize(mu, logvar): + s_var = logvar.mul(0.5).exp_() + eps = s_var.data.new(s_var.size()).normal_() + return eps.mul(s_var).add_(mu) + + @staticmethod + def ones_like(tensor, val=1.): + return torch.FloatTensor(tensor.size()).fill_(val).to(tensor.device).requires_grad_(False) + + @staticmethod + def zeros_like(tensor, val=0.): + return torch.FloatTensor(tensor.size()).fill_(val).to(tensor.device).requires_grad_(False) + + @staticmethod + def zero_grad(opt_list): + for opt in opt_list: + opt.zero_grad() + + @staticmethod + def clip_norm(network_list): + for network in network_list: + clip_grad_norm_(network.parameters(), 0.5) + + @staticmethod + def step(opt_list): + for opt in opt_list: + opt.step() + + @staticmethod + def kl_criterion(mu1, logvar1, mu2, logvar2): + # KL( N(mu1, sigma2_1) || N(mu_2, sigma2_2)) + # loss = log(sigma2/sigma1) + (sigma1^2 + (mu1 - mu2)^2)/(2*sigma2^2) - 1/2 + sigma1 = logvar1.mul(0.5).exp() + sigma2 = logvar2.mul(0.5).exp() + kld = torch.log(sigma2 / sigma1) + (torch.exp(logvar1) + (mu1 - mu2) ** 2) / ( + 2 * torch.exp(logvar2)) - 1 / 2 + return kld.sum() / mu1.shape[0] + + @staticmethod + def kl_criterion_unit(mu, logvar): + # KL( N(mu1, sigma2_1) || N(mu_2, sigma2_2)) + # loss = log(sigma2/sigma1) + (sigma1^2 + (mu1 - mu2)^2)/(2*sigma2^2) - 1/2 + kld = ((torch.exp(logvar) + mu ** 2) - logvar - 1) / 2 + return kld.sum() / mu.shape[0] + + def forward(self, batch_data, tf_ratio, mov_len, eval_mode=False): + word_emb, pos_ohot, caption, cap_lens, motions, m_lens = batch_data + word_emb = word_emb.detach().to(self.device).float() + pos_ohot = pos_ohot.detach().to(self.device).float() + motions = motions.detach().to(self.device).float() + self.cap_lens = cap_lens + self.caption = caption + + # print(motions.shape) + # (batch_size, motion_len, pose_dim) + self.motions = motions + + '''Movement Encoding''' + self.movements = self.mov_enc(self.motions[..., :-4]).detach() + # Initially input a mean vector + mov_in = self.mov_enc( + torch.zeros((self.motions.shape[0], self.opt.unit_length, self.motions.shape[-1] - 4), device=self.device) + ).squeeze(1).detach() + assert self.movements.shape[1] == mov_len + + teacher_force = True if random.random() < tf_ratio else False + + '''Text Encoding''' + # time0 = time.time() + # text_input = torch.cat([word_emb, pos_ohot], dim=-1) + word_hids, hidden = self.text_enc(word_emb, pos_ohot, cap_lens) + # print(word_hids.shape, hidden.shape) + + if self.opt.text_enc_mod == 'bigru': + hidden_pos = self.seq_post.get_init_hidden(hidden) + hidden_pri = self.seq_pri.get_init_hidden(hidden) + hidden_dec = self.seq_dec.get_init_hidden(hidden) + elif self.opt.text_enc_mod == 'transformer': + hidden_pos = self.seq_post.get_init_hidden(hidden.detach()) + hidden_pri = self.seq_pri.get_init_hidden(hidden.detach()) + hidden_dec = self.seq_dec.get_init_hidden(hidden) + + mus_pri = [] + logvars_pri = [] + mus_post = [] + logvars_post = [] + fake_mov_batch = [] + + query_input = [] + + # time1 = time.time() + # print("\t Text Encoder Cost:%5f" % (time1 - time0)) + # print(self.movements.shape) + + for i in range(mov_len): + # print("\t Sequence Measure") + # print(mov_in.shape) + mov_tgt = self.movements[:, i] + '''Local Attention Vector''' + att_vec, _ = self.att_layer(hidden_dec[-1], word_hids) + query_input.append(hidden_dec[-1]) + + tta = m_lens // self.opt.unit_length - i + + if self.opt.text_enc_mod == 'bigru': + pos_in = torch.cat([mov_in, mov_tgt, att_vec], dim=-1) + pri_in = torch.cat([mov_in, att_vec], dim=-1) + + elif self.opt.text_enc_mod == 'transformer': + pos_in = torch.cat([mov_in, mov_tgt, att_vec.detach()], dim=-1) + pri_in = torch.cat([mov_in, att_vec.detach()], dim=-1) + + '''Posterior''' + z_pos, mu_pos, logvar_pos, hidden_pos = self.seq_post(pos_in, hidden_pos, tta) + + '''Prior''' + z_pri, mu_pri, logvar_pri, hidden_pri = self.seq_pri(pri_in, hidden_pri, tta) + + '''Decoder''' + if eval_mode: + dec_in = torch.cat([mov_in, att_vec, z_pri], dim=-1) + else: + dec_in = torch.cat([mov_in, att_vec, z_pos], dim=-1) + fake_mov, hidden_dec = self.seq_dec(dec_in, mov_in, hidden_dec, tta) + + # print(fake_mov.shape) + + mus_post.append(mu_pos) + logvars_post.append(logvar_pos) + mus_pri.append(mu_pri) + logvars_pri.append(logvar_pri) + fake_mov_batch.append(fake_mov.unsqueeze(1)) + + if teacher_force: + mov_in = self.movements[:, i].detach() + else: + mov_in = fake_mov.detach() + + + self.fake_movements = torch.cat(fake_mov_batch, dim=1) + + # print(self.fake_movements.shape) + + self.fake_motions = self.mov_dec(self.fake_movements) + + self.mus_post = torch.cat(mus_post, dim=0) + self.mus_pri = torch.cat(mus_pri, dim=0) + self.logvars_post = torch.cat(logvars_post, dim=0) + self.logvars_pri = torch.cat(logvars_pri, dim=0) + + def generate(self, word_emb, pos_ohot, cap_lens, m_lens, mov_len, dim_pose): + word_emb = word_emb.detach().to(self.device).float() + pos_ohot = pos_ohot.detach().to(self.device).float() + self.cap_lens = cap_lens + + # print(motions.shape) + # (batch_size, motion_len, pose_dim) + + '''Movement Encoding''' + # Initially input a mean vector + mov_in = self.mov_enc( + torch.zeros((word_emb.shape[0], self.opt.unit_length, dim_pose - 4), device=self.device) + ).squeeze(1).detach() + + '''Text Encoding''' + # time0 = time.time() + # text_input = torch.cat([word_emb, pos_ohot], dim=-1) + word_hids, hidden = self.text_enc(word_emb, pos_ohot, cap_lens) + # print(word_hids.shape, hidden.shape) + + hidden_pri = self.seq_pri.get_init_hidden(hidden) + hidden_dec = self.seq_dec.get_init_hidden(hidden) + + mus_pri = [] + logvars_pri = [] + fake_mov_batch = [] + att_wgt = [] + + # time1 = time.time() + # print("\t Text Encoder Cost:%5f" % (time1 - time0)) + # print(self.movements.shape) + + for i in range(mov_len): + # print("\t Sequence Measure") + # print(mov_in.shape) + '''Local Attention Vector''' + att_vec, co_weights = self.att_layer(hidden_dec[-1], word_hids) + + tta = m_lens // self.opt.unit_length - i + # tta = m_lens - i + + '''Prior''' + pri_in = torch.cat([mov_in, att_vec], dim=-1) + z_pri, mu_pri, logvar_pri, hidden_pri = self.seq_pri(pri_in, hidden_pri, tta) + + '''Decoder''' + dec_in = torch.cat([mov_in, att_vec, z_pri], dim=-1) + + fake_mov, hidden_dec = self.seq_dec(dec_in, mov_in, hidden_dec, tta) + + # print(fake_mov.shape) + mus_pri.append(mu_pri) + logvars_pri.append(logvar_pri) + fake_mov_batch.append(fake_mov.unsqueeze(1)) + att_wgt.append(co_weights) + + mov_in = fake_mov.detach() + + fake_movements = torch.cat(fake_mov_batch, dim=1) + att_wgts = torch.cat(att_wgt, dim=-1) + + # print(self.fake_movements.shape) + + fake_motions = self.mov_dec(fake_movements) + + mus_pri = torch.cat(mus_pri, dim=0) + logvars_pri = torch.cat(logvars_pri, dim=0) + + return fake_motions, mus_pri, att_wgts + + def backward_G(self): + self.loss_mot_rec = self.l1_criterion(self.fake_motions, self.motions) + self.loss_mov_rec = self.l1_criterion(self.fake_movements, self.movements) + + self.loss_kld = self.kl_criterion(self.mus_post, self.logvars_post, self.mus_pri, self.logvars_pri) + + self.loss_gen = self.loss_mot_rec * self.opt.lambda_rec_mov + self.loss_mov_rec * self.opt.lambda_rec_mot + \ + self.loss_kld * self.opt.lambda_kld + loss_logs = OrderedDict({}) + loss_logs['loss_gen'] = self.loss_gen.item() + loss_logs['loss_mot_rec'] = self.loss_mot_rec.item() + loss_logs['loss_mov_rec'] = self.loss_mov_rec.item() + loss_logs['loss_kld'] = self.loss_kld.item() + + return loss_logs + # self.loss_gen = self.loss_rec_mov + + # self.loss_gen = self.loss_rec_mov * self.opt.lambda_rec_mov + self.loss_rec_mot + \ + # self.loss_kld * self.opt.lambda_kld + \ + # self.loss_mtgan_G * self.opt.lambda_gan_mt + self.loss_mvgan_G * self.opt.lambda_gan_mv + + + def update(self): + + self.zero_grad([self.opt_text_enc, self.opt_seq_dec, self.opt_seq_post, + self.opt_seq_pri, self.opt_att_layer, self.opt_mov_dec]) + # time2_0 = time.time() + # print("\t\t Zero Grad:%5f" % (time2_0 - time1)) + loss_logs = self.backward_G() + + # time2_1 = time.time() + # print("\t\t Backward_G :%5f" % (time2_1 - time2_0)) + self.loss_gen.backward() + + # time2_2 = time.time() + # print("\t\t Backward :%5f" % (time2_2 - time2_1)) + self.clip_norm([self.text_enc, self.seq_dec, self.seq_post, self.seq_pri, + self.att_layer, self.mov_dec]) + + # time2_3 = time.time() + # print("\t\t Clip Norm :%5f" % (time2_3 - time2_2)) + self.step([self.opt_text_enc, self.opt_seq_dec, self.opt_seq_post, + self.opt_seq_pri, self.opt_att_layer, self.opt_mov_dec]) + + # time2_4 = time.time() + # print("\t\t Step :%5f" % (time2_4 - time2_3)) + + # time2 = time.time() + # print("\t Update Generator Cost:%5f" % (time2 - time1)) + + # self.zero_grad([self.opt_att_layer]) + # self.backward_Att() + # self.loss_lgan_G_.backward() + # self.clip_norm([self.att_layer]) + # self.step([self.opt_att_layer]) + # # time3 = time.time() + # # print("\t Update Att Cost:%5f" % (time3 - time2)) + + # self.loss_gen += self.loss_lgan_G_ + + return loss_logs + + def to(self, device): + if self.opt.is_train: + self.gan_criterion.to(device) + self.mse_criterion.to(device) + self.l1_criterion.to(device) + self.seq_post.to(device) + self.mov_enc.to(device) + self.text_enc.to(device) + self.mov_dec.to(device) + self.seq_pri.to(device) + self.att_layer.to(device) + self.seq_dec.to(device) + + def train_mode(self): + if self.opt.is_train: + self.seq_post.train() + self.mov_enc.eval() + # self.motion_dis.train() + # self.movement_dis.train() + self.mov_dec.train() + self.text_enc.train() + self.seq_pri.train() + self.att_layer.train() + self.seq_dec.train() + + + def eval_mode(self): + if self.opt.is_train: + self.seq_post.eval() + self.mov_enc.eval() + # self.motion_dis.train() + # self.movement_dis.train() + self.mov_dec.eval() + self.text_enc.eval() + self.seq_pri.eval() + self.att_layer.eval() + self.seq_dec.eval() + + + def save(self, file_name, ep, total_it, sub_ep, sl_len): + state = { + # 'latent_dis': self.latent_dis.state_dict(), + # 'motion_dis': self.motion_dis.state_dict(), + 'text_enc': self.text_enc.state_dict(), + 'seq_post': self.seq_post.state_dict(), + 'att_layer': self.att_layer.state_dict(), + 'seq_dec': self.seq_dec.state_dict(), + 'seq_pri': self.seq_pri.state_dict(), + 'mov_enc': self.mov_enc.state_dict(), + 'mov_dec': self.mov_dec.state_dict(), + + # 'opt_motion_dis': self.opt_motion_dis.state_dict(), + 'opt_mov_dec': self.opt_mov_dec.state_dict(), + 'opt_text_enc': self.opt_text_enc.state_dict(), + 'opt_seq_pri': self.opt_seq_pri.state_dict(), + 'opt_att_layer': self.opt_att_layer.state_dict(), + 'opt_seq_post': self.opt_seq_post.state_dict(), + 'opt_seq_dec': self.opt_seq_dec.state_dict(), + # 'opt_movement_dis': self.opt_movement_dis.state_dict(), + + 'ep': ep, + 'total_it': total_it, + 'sub_ep': sub_ep, + 'sl_len': sl_len + } + torch.save(state, file_name) + return + + def load(self, model_dir): + checkpoint = torch.load(model_dir, map_location=self.device) + if self.opt.is_train: + self.seq_post.load_state_dict(checkpoint['seq_post']) + # self.opt_latent_dis.load_state_dict(checkpoint['opt_latent_dis']) + + self.opt_text_enc.load_state_dict(checkpoint['opt_text_enc']) + self.opt_seq_post.load_state_dict(checkpoint['opt_seq_post']) + self.opt_att_layer.load_state_dict(checkpoint['opt_att_layer']) + self.opt_seq_pri.load_state_dict(checkpoint['opt_seq_pri']) + self.opt_seq_dec.load_state_dict(checkpoint['opt_seq_dec']) + self.opt_mov_dec.load_state_dict(checkpoint['opt_mov_dec']) + + self.text_enc.load_state_dict(checkpoint['text_enc']) + self.mov_dec.load_state_dict(checkpoint['mov_dec']) + self.seq_pri.load_state_dict(checkpoint['seq_pri']) + self.att_layer.load_state_dict(checkpoint['att_layer']) + self.seq_dec.load_state_dict(checkpoint['seq_dec']) + self.mov_enc.load_state_dict(checkpoint['mov_enc']) + + return checkpoint['ep'], checkpoint['total_it'], checkpoint['sub_ep'], checkpoint['sl_len'] + + def train(self, train_dataset, val_dataset, plot_eval): + self.to(self.device) + + self.opt_text_enc = optim.Adam(self.text_enc.parameters(), lr=self.opt.lr) + self.opt_seq_post = optim.Adam(self.seq_post.parameters(), lr=self.opt.lr) + self.opt_seq_pri = optim.Adam(self.seq_pri.parameters(), lr=self.opt.lr) + self.opt_att_layer = optim.Adam(self.att_layer.parameters(), lr=self.opt.lr) + self.opt_seq_dec = optim.Adam(self.seq_dec.parameters(), lr=self.opt.lr) + + self.opt_mov_dec = optim.Adam(self.mov_dec.parameters(), lr=self.opt.lr*0.1) + + epoch = 0 + it = 0 + if self.opt.dataset_name == 't2m': + schedule_len = 10 + elif self.opt.dataset_name == 'kit': + schedule_len = 6 + sub_ep = 0 + + if self.opt.is_continue: + model_dir = pjoin(self.opt.model_dir, 'latest.tar') + epoch, it, sub_ep, schedule_len = self.load(model_dir) + + invalid = True + start_time = time.time() + val_loss = 0 + is_continue_and_first = self.opt.is_continue + while invalid: + train_dataset.reset_max_len(schedule_len * self.opt.unit_length) + val_dataset.reset_max_len(schedule_len * self.opt.unit_length) + + train_loader = DataLoader(train_dataset, batch_size=self.opt.batch_size, drop_last=True, num_workers=4, + shuffle=True, collate_fn=collate_fn, pin_memory=True) + val_loader = DataLoader(val_dataset, batch_size=self.opt.batch_size, drop_last=True, num_workers=4, + shuffle=True, collate_fn=collate_fn, pin_memory=True) + print("Max_Length:%03d Training Split:%05d Validation Split:%04d" % (schedule_len, len(train_loader), len(val_loader))) + + min_val_loss = np.inf + stop_cnt = 0 + logs = OrderedDict() + for sub_epoch in range(sub_ep, self.opt.max_sub_epoch): + self.train_mode() + + if is_continue_and_first: + sub_ep = 0 + is_continue_and_first = False + + tf_ratio = self.opt.tf_ratio + + time1 = time.time() + for i, batch_data in enumerate(train_loader): + time2 = time.time() + self.forward(batch_data, tf_ratio, schedule_len) + time3 = time.time() + log_dict = self.update() + for k, v in log_dict.items(): + if k not in logs: + logs[k] = v + else: + logs[k] += v + time4 = time.time() + + + it += 1 + if it % self.opt.log_every == 0: + mean_loss = OrderedDict({'val_loss': val_loss}) + self.logger.scalar_summary('val_loss', val_loss, it) + self.logger.scalar_summary('scheduled_length', schedule_len, it) + + for tag, value in logs.items(): + self.logger.scalar_summary(tag, value/self.opt.log_every, it) + mean_loss[tag] = value / self.opt.log_every + logs = OrderedDict() + print_current_loss(start_time, it, mean_loss, epoch, sub_epoch=sub_epoch, inner_iter=i, + tf_ratio=tf_ratio, sl_steps=schedule_len) + + if it % self.opt.save_latest == 0: + self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it, sub_epoch, schedule_len) + + time5 = time.time() + # print("Data Loader Time: %5f s" % ((time2 - time1))) + # print("Forward Time: %5f s" % ((time3 - time2))) + # print("Update Time: %5f s" % ((time4 - time3))) + # print('Per Iteration: %5f s' % ((time5 - time1))) + time1 = time5 + + self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it, sub_epoch, schedule_len) + + epoch += 1 + if epoch % self.opt.save_every_e == 0: + self.save(pjoin(self.opt.model_dir, 'E%03d_SE%02d_SL%02d.tar'%(epoch, sub_epoch, schedule_len)), + epoch, total_it=it, sub_ep=sub_epoch, sl_len=schedule_len) + + print('Validation time:') + + loss_mot_rec = 0 + loss_mov_rec = 0 + loss_kld = 0 + val_loss = 0 + with torch.no_grad(): + for i, batch_data in enumerate(val_loader): + self.forward(batch_data, 0, schedule_len) + self.backward_G() + loss_mot_rec += self.loss_mot_rec.item() + loss_mov_rec += self.loss_mov_rec.item() + loss_kld += self.loss_kld.item() + val_loss += self.loss_gen.item() + + loss_mot_rec /= len(val_loader) + 1 + loss_mov_rec /= len(val_loader) + 1 + loss_kld /= len(val_loader) + 1 + val_loss /= len(val_loader) + 1 + print('Validation Loss: %.5f Movement Recon Loss: %.5f Motion Recon Loss: %.5f KLD Loss: %.5f:' % + (val_loss, loss_mov_rec, loss_mot_rec, loss_kld)) + + if epoch % self.opt.eval_every_e == 0: + reco_data = self.fake_motions[:4] + with torch.no_grad(): + self.forward(batch_data, 0, schedule_len, eval_mode=True) + fake_data = self.fake_motions[:4] + gt_data = self.motions[:4] + data = torch.cat([fake_data, reco_data, gt_data], dim=0).cpu().numpy() + captions = self.caption[:4] * 3 + save_dir = pjoin(self.opt.eval_dir, 'E%03d_SE%02d_SL%02d'%(epoch, sub_epoch, schedule_len)) + os.makedirs(save_dir, exist_ok=True) + plot_eval(data, save_dir, captions) + + # if cl_ratio == 1: + if val_loss < min_val_loss: + min_val_loss = val_loss + stop_cnt = 0 + elif stop_cnt < self.opt.early_stop_count: + stop_cnt += 1 + elif stop_cnt >= self.opt.early_stop_count: + break + if val_loss - min_val_loss >= 0.1: + break + + schedule_len += 1 + + if schedule_len > 49: + invalid = False + + +class LengthEstTrainer(object): + + def __init__(self, args, estimator): + self.opt = args + self.estimator = estimator + self.device = args.device + + if args.is_train: + # self.motion_dis + self.logger = Logger(args.log_dir) + self.mul_cls_criterion = torch.nn.CrossEntropyLoss() + + def resume(self, model_dir): + checkpoints = torch.load(model_dir, map_location=self.device) + self.estimator.load_state_dict(checkpoints['estimator']) + self.opt_estimator.load_state_dict(checkpoints['opt_estimator']) + return checkpoints['epoch'], checkpoints['iter'] + + def save(self, model_dir, epoch, niter): + state = { + 'estimator': self.estimator.state_dict(), + 'opt_estimator': self.opt_estimator.state_dict(), + 'epoch': epoch, + 'niter': niter, + } + torch.save(state, model_dir) + + @staticmethod + def zero_grad(opt_list): + for opt in opt_list: + opt.zero_grad() + + @staticmethod + def clip_norm(network_list): + for network in network_list: + clip_grad_norm_(network.parameters(), 0.5) + + @staticmethod + def step(opt_list): + for opt in opt_list: + opt.step() + + def train(self, train_dataloader, val_dataloader): + self.estimator.to(self.device) + + self.opt_estimator = optim.Adam(self.estimator.parameters(), lr=self.opt.lr) + + epoch = 0 + it = 0 + + if self.opt.is_continue: + model_dir = pjoin(self.opt.model_dir, 'latest.tar') + epoch, it = self.resume(model_dir) + + start_time = time.time() + total_iters = self.opt.max_epoch * len(train_dataloader) + print('Iters Per Epoch, Training: %04d, Validation: %03d' % (len(train_dataloader), len(val_dataloader))) + val_loss = 0 + min_val_loss = np.inf + logs = OrderedDict({'loss': 0}) + while epoch < self.opt.max_epoch: + # time0 = time.time() + for i, batch_data in enumerate(train_dataloader): + self.estimator.train() + + word_emb, pos_ohot, _, cap_lens, _, m_lens = batch_data + word_emb = word_emb.detach().to(self.device).float() + pos_ohot = pos_ohot.detach().to(self.device).float() + + pred_dis = self.estimator(word_emb, pos_ohot, cap_lens) + + self.zero_grad([self.opt_estimator]) + + gt_labels = m_lens // self.opt.unit_length + gt_labels = gt_labels.long().to(self.device) + # print(gt_labels) + # print(pred_dis) + loss = self.mul_cls_criterion(pred_dis, gt_labels) + + loss.backward() + + self.clip_norm([self.estimator]) + self.step([self.opt_estimator]) + + logs['loss'] += loss.item() + + it += 1 + if it % self.opt.log_every == 0: + mean_loss = OrderedDict({'val_loss': val_loss}) + self.logger.scalar_summary('val_loss', val_loss, it) + + for tag, value in logs.items(): + self.logger.scalar_summary(tag, value / self.opt.log_every, it) + mean_loss[tag] = value / self.opt.log_every + logs = OrderedDict({'loss': 0}) + print_current_loss_decomp(start_time, it, total_iters, mean_loss, epoch, i) + + if it % self.opt.save_latest == 0: + self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it) + + self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it) + + epoch += 1 + if epoch % self.opt.save_every_e == 0: + self.save(pjoin(self.opt.model_dir, 'E%04d.tar' % (epoch)), epoch, it) + + print('Validation time:') + + val_loss = 0 + with torch.no_grad(): + for i, batch_data in enumerate(val_dataloader): + word_emb, pos_ohot, _, cap_lens, _, m_lens = batch_data + word_emb = word_emb.detach().to(self.device).float() + pos_ohot = pos_ohot.detach().to(self.device).float() + + pred_dis = self.estimator(word_emb, pos_ohot, cap_lens) + + gt_labels = m_lens // self.opt.unit_length + gt_labels = gt_labels.long().to(self.device) + loss = self.mul_cls_criterion(pred_dis, gt_labels) + + val_loss += loss.item() + + val_loss = val_loss / (len(val_dataloader) + 1) + print('Validation Loss: %.5f' % (val_loss)) + + if val_loss < min_val_loss: + self.save(pjoin(self.opt.model_dir, 'finest.tar'), epoch, it) + min_val_loss = val_loss + + +class TextMotionMatchTrainer(object): + + def __init__(self, args, text_encoder, motion_encoder, movement_encoder): + self.opt = args + self.text_encoder = text_encoder + self.motion_encoder = motion_encoder + self.movement_encoder = movement_encoder + self.device = args.device + + if args.is_train: + # self.motion_dis + self.logger = Logger(args.log_dir) + self.contrastive_loss = ContrastiveLoss(self.opt.negative_margin) + + def resume(self, model_dir): + checkpoints = torch.load(model_dir, map_location=self.device) + self.text_encoder.load_state_dict(checkpoints['text_encoder']) + self.motion_encoder.load_state_dict(checkpoints['motion_encoder']) + self.movement_encoder.load_state_dict(checkpoints['movement_encoder']) + + self.opt_text_encoder.load_state_dict(checkpoints['opt_text_encoder']) + self.opt_motion_encoder.load_state_dict(checkpoints['opt_motion_encoder']) + return checkpoints['epoch'], checkpoints['iter'] + + def save(self, model_dir, epoch, niter): + state = { + 'text_encoder': self.text_encoder.state_dict(), + 'motion_encoder': self.motion_encoder.state_dict(), + 'movement_encoder': self.movement_encoder.state_dict(), + + 'opt_text_encoder': self.opt_text_encoder.state_dict(), + 'opt_motion_encoder': self.opt_motion_encoder.state_dict(), + 'epoch': epoch, + 'iter': niter, + } + torch.save(state, model_dir) + + @staticmethod + def zero_grad(opt_list): + for opt in opt_list: + opt.zero_grad() + + @staticmethod + def clip_norm(network_list): + for network in network_list: + clip_grad_norm_(network.parameters(), 0.5) + + @staticmethod + def step(opt_list): + for opt in opt_list: + opt.step() + + def to(self, device): + self.text_encoder.to(device) + self.motion_encoder.to(device) + self.movement_encoder.to(device) + + def train_mode(self): + self.text_encoder.train() + self.motion_encoder.train() + self.movement_encoder.eval() + + def forward(self, batch_data): + word_emb, pos_ohot, caption, cap_lens, motions, m_lens, _ = batch_data + word_emb = word_emb.detach().to(self.device).float() + pos_ohot = pos_ohot.detach().to(self.device).float() + motions = motions.detach().to(self.device).float() + + # Sort the length of motions in descending order, (length of text has been sorted) + self.align_idx = np.argsort(m_lens.data.tolist())[::-1].copy() + # print(self.align_idx) + # print(m_lens[self.align_idx]) + motions = motions[self.align_idx] + m_lens = m_lens[self.align_idx] + + '''Movement Encoding''' + movements = self.movement_encoder(motions[..., :-4]).detach() + m_lens = m_lens // self.opt.unit_length + self.motion_embedding = self.motion_encoder(movements, m_lens) + + '''Text Encoding''' + # time0 = time.time() + # text_input = torch.cat([word_emb, pos_ohot], dim=-1) + self.text_embedding = self.text_encoder(word_emb, pos_ohot, cap_lens) + self.text_embedding = self.text_embedding.clone()[self.align_idx] + + + def backward(self): + + batch_size = self.text_embedding.shape[0] + '''Positive pairs''' + pos_labels = torch.zeros(batch_size).to(self.text_embedding.device) + self.loss_pos = self.contrastive_loss(self.text_embedding, self.motion_embedding, pos_labels) + + '''Negative Pairs, shifting index''' + neg_labels = torch.ones(batch_size).to(self.text_embedding.device) + shift = np.random.randint(0, batch_size-1) + new_idx = np.arange(shift, batch_size + shift) % batch_size + self.mis_motion_embedding = self.motion_embedding.clone()[new_idx] + self.loss_neg = self.contrastive_loss(self.text_embedding, self.mis_motion_embedding, neg_labels) + self.loss = self.loss_pos + self.loss_neg + + loss_logs = OrderedDict({}) + loss_logs['loss'] = self.loss.item() + loss_logs['loss_pos'] = self.loss_pos.item() + loss_logs['loss_neg'] = self.loss_neg.item() + return loss_logs + + + def update(self): + + self.zero_grad([self.opt_motion_encoder, self.opt_text_encoder]) + loss_logs = self.backward() + self.loss.backward() + self.clip_norm([self.text_encoder, self.motion_encoder]) + self.step([self.opt_text_encoder, self.opt_motion_encoder]) + + return loss_logs + + + def train(self, train_dataloader, val_dataloader): + self.to(self.device) + + self.opt_motion_encoder = optim.Adam(self.motion_encoder.parameters(), lr=self.opt.lr) + self.opt_text_encoder = optim.Adam(self.text_encoder.parameters(), lr=self.opt.lr) + + epoch = 0 + it = 0 + + if self.opt.is_continue: + model_dir = pjoin(self.opt.model_dir, 'latest.tar') + epoch, it = self.resume(model_dir) + + start_time = time.time() + total_iters = self.opt.max_epoch * len(train_dataloader) + print('Iters Per Epoch, Training: %04d, Validation: %03d' % (len(train_dataloader), len(val_dataloader))) + val_loss = 0 + logs = OrderedDict() + + min_val_loss = np.inf + while epoch < self.opt.max_epoch: + # time0 = time.time() + for i, batch_data in enumerate(train_dataloader): + self.train_mode() + + self.forward(batch_data) + # time3 = time.time() + log_dict = self.update() + for k, v in log_dict.items(): + if k not in logs: + logs[k] = v + else: + logs[k] += v + + + it += 1 + if it % self.opt.log_every == 0: + mean_loss = OrderedDict({'val_loss': val_loss}) + self.logger.scalar_summary('val_loss', val_loss, it) + + for tag, value in logs.items(): + self.logger.scalar_summary(tag, value / self.opt.log_every, it) + mean_loss[tag] = value / self.opt.log_every + logs = OrderedDict() + print_current_loss_decomp(start_time, it, total_iters, mean_loss, epoch, i) + + if it % self.opt.save_latest == 0: + self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it) + + self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it) + + epoch += 1 + if epoch % self.opt.save_every_e == 0: + self.save(pjoin(self.opt.model_dir, 'E%04d.tar' % (epoch)), epoch, it) + + print('Validation time:') + + loss_pos_pair = 0 + loss_neg_pair = 0 + val_loss = 0 + with torch.no_grad(): + for i, batch_data in enumerate(val_dataloader): + self.forward(batch_data) + self.backward() + loss_pos_pair += self.loss_pos.item() + loss_neg_pair += self.loss_neg.item() + val_loss += self.loss.item() + + loss_pos_pair /= len(val_dataloader) + 1 + loss_neg_pair /= len(val_dataloader) + 1 + val_loss /= len(val_dataloader) + 1 + print('Validation Loss: %.5f Positive Loss: %.5f Negative Loss: %.5f' % + (val_loss, loss_pos_pair, loss_neg_pair)) + + if val_loss < min_val_loss: + self.save(pjoin(self.opt.model_dir, 'finest.tar'), epoch, it) + min_val_loss = val_loss + + if epoch % self.opt.eval_every_e == 0: + pos_dist = F.pairwise_distance(self.text_embedding, self.motion_embedding) + neg_dist = F.pairwise_distance(self.text_embedding, self.mis_motion_embedding) + + pos_str = ' '.join(['%.3f' % (pos_dist[i]) for i in range(pos_dist.shape[0])]) + neg_str = ' '.join(['%.3f' % (neg_dist[i]) for i in range(neg_dist.shape[0])]) + + save_path = pjoin(self.opt.eval_dir, 'E%03d.txt' % (epoch)) + with cs.open(save_path, 'w') as f: + f.write('Positive Pairs Distance\n') + f.write(pos_str + '\n') + f.write('Negative Pairs Distance\n') + f.write(neg_str + '\n') diff --git a/motion_diffusion_model/data_loaders/humanml/scripts/motion_process.py b/motion_diffusion_model/data_loaders/humanml/scripts/motion_process.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb0c40ceffb270c70415845aeeb4f260704d894 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/scripts/motion_process.py @@ -0,0 +1,669 @@ +from os.path import join as pjoin + +from data_loaders.humanml.common.skeleton import Skeleton +import numpy as np +import os +from data_loaders.humanml.common.quaternion import * +from data_loaders.humanml.utils.paramUtil import * + +import torch +from tqdm import tqdm +from data_loaders.humanml_utils import HML_JOINT_NAMES, HML_EE_JOINT_NAMES + +import random +from copy import copy, deepcopy + +# positions (batch, joint_num, 3) +def uniform_skeleton(positions, target_offset): + src_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu') + src_offset = src_skel.get_offsets_joints(torch.from_numpy(positions[0])) + src_offset = src_offset.numpy() + tgt_offset = target_offset.numpy() + # print(src_offset) + # print(tgt_offset) + '''Calculate Scale Ratio as the ratio of legs''' + src_leg_len = np.abs(src_offset[l_idx1]).max() + np.abs(src_offset[l_idx2]).max() + tgt_leg_len = np.abs(tgt_offset[l_idx1]).max() + np.abs(tgt_offset[l_idx2]).max() + + scale_rt = tgt_leg_len / src_leg_len + # print(scale_rt) + src_root_pos = positions[:, 0] + tgt_root_pos = src_root_pos * scale_rt + + '''Inverse Kinematics''' + quat_params = src_skel.inverse_kinematics_np(positions, face_joint_indx) + # print(quat_params.shape) + + '''Forward Kinematics''' + src_skel.set_offset(target_offset) + new_joints = src_skel.forward_kinematics_np(quat_params, tgt_root_pos) + return new_joints + + +def extract_features(positions, feet_thre, n_raw_offsets, kinematic_chain, face_joint_indx, fid_r, fid_l): + global_positions = positions.copy() + """ Get Foot Contacts """ + + def foot_detect(positions, thres): + velfactor, heightfactor = np.array([thres, thres]), np.array([3.0, 2.0]) + + feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2 + feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2 + feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2 + # feet_l_h = positions[:-1,fid_l,1] + # feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float) + feet_l = ((feet_l_x + feet_l_y + feet_l_z) < velfactor).astype(np.float) + + feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2 + feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2 + feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2 + # feet_r_h = positions[:-1,fid_r,1] + # feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float) + feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor)).astype(np.float) + return feet_l, feet_r + + # + feet_l, feet_r = foot_detect(positions, feet_thre) + # feet_l, feet_r = foot_detect(positions, 0.002) + + '''Quaternion and Cartesian representation''' + r_rot = None + + def get_rifke(positions): + '''Local pose''' + positions[..., 0] -= positions[:, 0:1, 0] + positions[..., 2] -= positions[:, 0:1, 2] + '''All pose face Z+''' + positions = qrot_np(np.repeat(r_rot[:, None], positions.shape[1], axis=1), positions) + return positions + + def get_quaternion(positions): + skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu") + # (seq_len, joints_num, 4) + quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=False) + + '''Fix Quaternion Discontinuity''' + quat_params = qfix(quat_params) + # (seq_len, 4) + r_rot = quat_params[:, 0].copy() + # print(r_rot[0]) + '''Root Linear Velocity''' + # (seq_len - 1, 3) + velocity = (positions[1:, 0] - positions[:-1, 0]).copy() + # print(r_rot.shape, velocity.shape) + velocity = qrot_np(r_rot[1:], velocity) + '''Root Angular Velocity''' + # (seq_len - 1, 4) + r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1])) + quat_params[1:, 0] = r_velocity + # (seq_len, joints_num, 4) + return quat_params, r_velocity, velocity, r_rot + + def get_cont6d_params(positions): + skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu") + # (seq_len, joints_num, 4) + quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True) + + '''Quaternion to continuous 6D''' + cont_6d_params = quaternion_to_cont6d_np(quat_params) + # (seq_len, 4) + r_rot = quat_params[:, 0].copy() + # print(r_rot[0]) + '''Root Linear Velocity''' + # (seq_len - 1, 3) + velocity = (positions[1:, 0] - positions[:-1, 0]).copy() + # print(r_rot.shape, velocity.shape) + velocity = qrot_np(r_rot[1:], velocity) + '''Root Angular Velocity''' + # (seq_len - 1, 4) + r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1])) + # (seq_len, joints_num, 4) + return cont_6d_params, r_velocity, velocity, r_rot + + cont_6d_params, r_velocity, velocity, r_rot = get_cont6d_params(positions) + positions = get_rifke(positions) + + # trejec = np.cumsum(np.concatenate([np.array([[0, 0, 0]]), velocity], axis=0), axis=0) + # r_rotations, r_pos = recover_ric_glo_np(r_velocity, velocity[:, [0, 2]]) + + # plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*') + # plt.plot(ground_positions[:, 0, 0], ground_positions[:, 0, 2], marker='o', color='r') + # plt.plot(trejec[:, 0], trejec[:, 2], marker='^', color='g') + # plt.plot(r_pos[:, 0], r_pos[:, 2], marker='s', color='y') + # plt.xlabel('x') + # plt.ylabel('z') + # plt.axis('equal') + # plt.show() + + '''Root height''' + root_y = positions[:, 0, 1:2] + + '''Root rotation and linear velocity''' + # (seq_len-1, 1) rotation velocity along y-axis + # (seq_len-1, 2) linear velovity on xz plane + r_velocity = np.arcsin(r_velocity[:, 2:3]) + l_velocity = velocity[:, [0, 2]] + # print(r_velocity.shape, l_velocity.shape, root_y.shape) + root_data = np.concatenate([r_velocity, l_velocity, root_y[:-1]], axis=-1) + + '''Get Joint Rotation Representation''' + # (seq_len, (joints_num-1) *6) quaternion for skeleton joints + rot_data = cont_6d_params[:, 1:].reshape(len(cont_6d_params), -1) + + '''Get Joint Rotation Invariant Position Represention''' + # (seq_len, (joints_num-1)*3) local joint position + ric_data = positions[:, 1:].reshape(len(positions), -1) + + '''Get Joint Velocity Representation''' + # (seq_len-1, joints_num*3) + local_vel = qrot_np(np.repeat(r_rot[:-1, None], global_positions.shape[1], axis=1), + global_positions[1:] - global_positions[:-1]) + local_vel = local_vel.reshape(len(local_vel), -1) + + data = root_data + data = np.concatenate([data, ric_data[:-1]], axis=-1) + data = np.concatenate([data, rot_data[:-1]], axis=-1) + # print(dataset.shape, local_vel.shape) + data = np.concatenate([data, local_vel], axis=-1) + data = np.concatenate([data, feet_l, feet_r], axis=-1) + + return data + + +def process_file(positions, feet_thre): + # (seq_len, joints_num, 3) + # '''Down Sample''' + # positions = positions[::ds_num] + + '''Uniform Skeleton''' + positions = uniform_skeleton(positions, tgt_offsets) + + '''Put on Floor''' + floor_height = positions.min(axis=0).min(axis=0)[1] + positions[:, :, 1] -= floor_height + # print(floor_height) + + # plot_3d_motion("./positions_1.mp4", kinematic_chain, positions, 'title', fps=20) + + '''XZ at origin''' + root_pos_init = positions[0] + root_pose_init_xz = root_pos_init[0] * np.array([1, 0, 1]) + positions = positions - root_pose_init_xz + + # '''Move the first pose to origin ''' + # root_pos_init = positions[0] + # positions = positions - root_pos_init[0] + + '''All initially face Z+''' + r_hip, l_hip, sdr_r, sdr_l = face_joint_indx + across1 = root_pos_init[r_hip] - root_pos_init[l_hip] + across2 = root_pos_init[sdr_r] - root_pos_init[sdr_l] + across = across1 + across2 + across = across / np.sqrt((across ** 2).sum(axis=-1))[..., np.newaxis] + + # forward (3,), rotate around y-axis + forward_init = np.cross(np.array([[0, 1, 0]]), across, axis=-1) + # forward (3,) + forward_init = forward_init / np.sqrt((forward_init ** 2).sum(axis=-1))[..., np.newaxis] + + # print(forward_init) + + target = np.array([[0, 0, 1]]) + root_quat_init = qbetween_np(forward_init, target) + root_quat_init = np.ones(positions.shape[:-1] + (4,)) * root_quat_init + + positions_b = positions.copy() + + positions = qrot_np(root_quat_init, positions) + + # plot_3d_motion("./positions_2.mp4", kinematic_chain, positions, 'title', fps=20) + + '''New ground truth positions''' + global_positions = positions.copy() + + # plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*') + # plt.plot(positions[:, 0, 0], positions[:, 0, 2], marker='o', color='r') + # plt.xlabel('x') + # plt.ylabel('z') + # plt.axis('equal') + # plt.show() + + """ Get Foot Contacts """ + + def foot_detect(positions, thres): + velfactor, heightfactor = np.array([thres, thres]), np.array([3.0, 2.0]) + + feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2 + feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2 + feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2 + # feet_l_h = positions[:-1,fid_l,1] + # feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float) + feet_l = ((feet_l_x + feet_l_y + feet_l_z) < velfactor).astype(np.float) + + feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2 + feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2 + feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2 + # feet_r_h = positions[:-1,fid_r,1] + # feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float) + feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor)).astype(np.float) + return feet_l, feet_r + # + feet_l, feet_r = foot_detect(positions, feet_thre) + # feet_l, feet_r = foot_detect(positions, 0.002) + + '''Quaternion and Cartesian representation''' + r_rot = None + + def get_rifke(positions): + '''Local pose''' + positions[..., 0] -= positions[:, 0:1, 0] + positions[..., 2] -= positions[:, 0:1, 2] + '''All pose face Z+''' + positions = qrot_np(np.repeat(r_rot[:, None], positions.shape[1], axis=1), positions) + return positions + + def get_quaternion(positions): + skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu") + # (seq_len, joints_num, 4) + quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=False) + + '''Fix Quaternion Discontinuity''' + quat_params = qfix(quat_params) + # (seq_len, 4) + r_rot = quat_params[:, 0].copy() + # print(r_rot[0]) + '''Root Linear Velocity''' + # (seq_len - 1, 3) + velocity = (positions[1:, 0] - positions[:-1, 0]).copy() + # print(r_rot.shape, velocity.shape) + velocity = qrot_np(r_rot[1:], velocity) + '''Root Angular Velocity''' + # (seq_len - 1, 4) + r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1])) + quat_params[1:, 0] = r_velocity + # (seq_len, joints_num, 4) + return quat_params, r_velocity, velocity, r_rot + + def get_cont6d_params(positions): + skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu") + # (seq_len, joints_num, 4) + quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True) + + '''Quaternion to continuous 6D''' + cont_6d_params = quaternion_to_cont6d_np(quat_params) + # (seq_len, 4) + r_rot = quat_params[:, 0].copy() + # print(r_rot[0]) + '''Root Linear Velocity''' + # (seq_len - 1, 3) + velocity = (positions[1:, 0] - positions[:-1, 0]).copy() + # print(r_rot.shape, velocity.shape) + velocity = qrot_np(r_rot[1:], velocity) + '''Root Angular Velocity''' + # (seq_len - 1, 4) + r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1])) + # (seq_len, joints_num, 4) + return cont_6d_params, r_velocity, velocity, r_rot + + cont_6d_params, r_velocity, velocity, r_rot = get_cont6d_params(positions) + positions = get_rifke(positions) + + # trejec = np.cumsum(np.concatenate([np.array([[0, 0, 0]]), velocity], axis=0), axis=0) + # r_rotations, r_pos = recover_ric_glo_np(r_velocity, velocity[:, [0, 2]]) + + # plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*') + # plt.plot(ground_positions[:, 0, 0], ground_positions[:, 0, 2], marker='o', color='r') + # plt.plot(trejec[:, 0], trejec[:, 2], marker='^', color='g') + # plt.plot(r_pos[:, 0], r_pos[:, 2], marker='s', color='y') + # plt.xlabel('x') + # plt.ylabel('z') + # plt.axis('equal') + # plt.show() + + '''Root height''' + root_y = positions[:, 0, 1:2] + + '''Root rotation and linear velocity''' + # (seq_len-1, 1) rotation velocity along y-axis + # (seq_len-1, 2) linear velovity on xz plane + r_velocity = np.arcsin(r_velocity[:, 2:3]) + l_velocity = velocity[:, [0, 2]] + # print(r_velocity.shape, l_velocity.shape, root_y.shape) + root_data = np.concatenate([r_velocity, l_velocity, root_y[:-1]], axis=-1) + + '''Get Joint Rotation Representation''' + # (seq_len, (joints_num-1) *6) quaternion for skeleton joints + rot_data = cont_6d_params[:, 1:].reshape(len(cont_6d_params), -1) + + '''Get Joint Rotation Invariant Position Represention''' + # (seq_len, (joints_num-1)*3) local joint position + ric_data = positions[:, 1:].reshape(len(positions), -1) + + '''Get Joint Velocity Representation''' + # (seq_len-1, joints_num*3) + local_vel = qrot_np(np.repeat(r_rot[:-1, None], global_positions.shape[1], axis=1), + global_positions[1:] - global_positions[:-1]) + local_vel = local_vel.reshape(len(local_vel), -1) + + data = root_data + data = np.concatenate([data, ric_data[:-1]], axis=-1) + data = np.concatenate([data, rot_data[:-1]], axis=-1) + # print(dataset.shape, local_vel.shape) + data = np.concatenate([data, local_vel], axis=-1) + data = np.concatenate([data, feet_l, feet_r], axis=-1) + + return data, global_positions, positions, l_velocity + + +# Recover global angle and positions for rotation dataset +# root_rot_velocity (B, seq_len, 1) +# root_linear_velocity (B, seq_len, 2) +# root_y (B, seq_len, 1) +# ric_data (B, seq_len, (joint_num - 1)*3) +# rot_data (B, seq_len, (joint_num - 1)*6) +# local_velocity (B, seq_len, joint_num*3) +# foot contact (B, seq_len, 4) +def recover_root_rot_pos(data): + rot_vel = data[..., 0] + r_rot_ang = torch.zeros_like(rot_vel).to(data.device) + '''Get Y-axis rotation from rotation velocity''' + r_rot_ang[..., 1:] = rot_vel[..., :-1] + r_rot_ang = torch.cumsum(r_rot_ang, dim=-1) + + r_rot_quat = torch.zeros(data.shape[:-1] + (4,)).to(data.device) + r_rot_quat[..., 0] = torch.cos(r_rot_ang) + r_rot_quat[..., 2] = torch.sin(r_rot_ang) + + r_pos = torch.zeros(data.shape[:-1] + (3,)).to(data.device) + r_pos[..., 1:, [0, 2]] = data[..., :-1, 1:3] + '''Add Y-axis rotation to root position''' + r_pos = qrot(qinv(r_rot_quat), r_pos) + + r_pos = torch.cumsum(r_pos, dim=-2) + + r_pos[..., 1] = data[..., 3] + return r_rot_quat, r_pos + + +def recover_root_rot_heading_ang(joints): + + '''Get Forward Direction''' + face_joint_idx = [2, 1, 17, 16] + # l_hip, r_hip, sdr_r, sdr_l = face_joint_idx + r_hip, l_hip, sdr_r, sdr_l = face_joint_idx # Note the bugfix + across1 = joints[:, r_hip] - joints[:, l_hip] + across2 = joints[:, sdr_r] - joints[:, sdr_l] + across = across1 + across2 + across = torch.nn.functional.normalize(across, dim=1) + # print(across1.shape, across2.shape) + + # forward (batch_size, 3) + forward = torch.cross(torch.tensor([[[0], [1], [0]]], dtype=across.dtype, device=across.device), across, axis=1) + forward = torch.nn.functional.normalize(forward, dim=1) + + return torch.atan2(forward[:, 0], forward[:, 2])[:, None] + +def recover_from_rot(data, joints_num, skeleton): + r_rot_quat, r_pos = recover_root_rot_pos(data) + + r_rot_cont6d = quaternion_to_cont6d(r_rot_quat) + + start_indx = 1 + 2 + 1 + (joints_num - 1) * 3 + end_indx = start_indx + (joints_num - 1) * 6 + cont6d_params = data[..., start_indx:end_indx] + # print(r_rot_cont6d.shape, cont6d_params.shape, r_pos.shape) + cont6d_params = torch.cat([r_rot_cont6d, cont6d_params], dim=-1) + cont6d_params = cont6d_params.view(-1, joints_num, 6) + + positions = skeleton.forward_kinematics_cont6d(cont6d_params, r_pos) + + return positions + +def recover_rot(data): + # dataset [bs, seqlen, 263/251] HumanML/KIT + joints_num = 22 if data.shape[-1] == 263 else 21 + r_rot_quat, r_pos = recover_root_rot_pos(data) + r_pos_pad = torch.cat([r_pos, torch.zeros_like(r_pos)], dim=-1).unsqueeze(-2) + r_rot_cont6d = quaternion_to_cont6d(r_rot_quat) + start_indx = 1 + 2 + 1 + (joints_num - 1) * 3 + end_indx = start_indx + (joints_num - 1) * 6 + cont6d_params = data[..., start_indx:end_indx] + cont6d_params = torch.cat([r_rot_cont6d, cont6d_params], dim=-1) + cont6d_params = cont6d_params.view(-1, joints_num, 6) + cont6d_params = torch.cat([cont6d_params, r_pos_pad], dim=-2) + return cont6d_params + + +def recover_from_ric(data, joints_num): + r_rot_quat, r_pos = recover_root_rot_pos(data) + positions = data[..., 4:(joints_num - 1) * 3 + 4] + positions = positions.view(positions.shape[:-1] + (-1, 3)) + + '''Add Y-axis rotation to local joints''' + positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions) + + '''Add root XZ to joints''' + positions[..., 0] += r_pos[..., 0:1] + positions[..., 2] += r_pos[..., 2:3] + + '''Concate root and joints''' + positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2) + + return positions +''' +For Text2Motion Dataset +''' +''' +if __name__ == "__main__": + example_id = "000021" + # Lower legs + l_idx1, l_idx2 = 5, 8 + # Right/Left foot + fid_r, fid_l = [8, 11], [7, 10] + # Face direction, r_hip, l_hip, sdr_r, sdr_l + face_joint_indx = [2, 1, 17, 16] + # l_hip, r_hip + r_hip, l_hip = 2, 1 + joints_num = 22 + # ds_num = 8 + data_dir = '../dataset/pose_data_raw/joints/' + save_dir1 = '../dataset/pose_data_raw/new_joints/' + save_dir2 = '../dataset/pose_data_raw/new_joint_vecs/' + + n_raw_offsets = torch.from_numpy(t2m_raw_offsets) + kinematic_chain = t2m_kinematic_chain + + # Get offsets of target skeleton + example_data = np.load(os.path.join(data_dir, example_id + '.npy')) + example_data = example_data.reshape(len(example_data), -1, 3) + example_data = torch.from_numpy(example_data) + tgt_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu') + # (joints_num, 3) + tgt_offsets = tgt_skel.get_offsets_joints(example_data[0]) + # print(tgt_offsets) + + source_list = os.listdir(data_dir) + frame_num = 0 + for source_file in tqdm(source_list): + source_data = np.load(os.path.join(data_dir, source_file))[:, :joints_num] + try: + dataset, ground_positions, positions, l_velocity = process_file(source_data, 0.002) + rec_ric_data = recover_from_ric(torch.from_numpy(dataset).unsqueeze(0).float(), joints_num) + np.save(pjoin(save_dir1, source_file), rec_ric_data.squeeze().numpy()) + np.save(pjoin(save_dir2, source_file), dataset) + frame_num += dataset.shape[0] + except Exception as e: + print(source_file) + print(e) + + print('Total clips: %d, Frames: %d, Duration: %fm' % + (len(source_list), frame_num, frame_num / 20 / 60)) +''' + +if __name__ == "__main__": + example_id = "03950_gt" + # Lower legs + l_idx1, l_idx2 = 17, 18 + # Right/Left foot + fid_r, fid_l = [14, 15], [19, 20] + # Face direction, r_hip, l_hip, sdr_r, sdr_l + face_joint_indx = [11, 16, 5, 8] + # l_hip, r_hip + r_hip, l_hip = 11, 16 + joints_num = 21 + # ds_num = 8 + data_dir = '../dataset/kit_mocap_dataset/joints/' + save_dir1 = '../dataset/kit_mocap_dataset/new_joints/' + save_dir2 = '../dataset/kit_mocap_dataset/new_joint_vecs/' + + n_raw_offsets = torch.from_numpy(kit_raw_offsets) + kinematic_chain = kit_kinematic_chain + + '''Get offsets of target skeleton''' + example_data = np.load(os.path.join(data_dir, example_id + '.npy')) + example_data = example_data.reshape(len(example_data), -1, 3) + example_data = torch.from_numpy(example_data) + tgt_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu') + # (joints_num, 3) + tgt_offsets = tgt_skel.get_offsets_joints(example_data[0]) + # print(tgt_offsets) + + source_list = os.listdir(data_dir) + frame_num = 0 + '''Read source dataset''' + for source_file in tqdm(source_list): + source_data = np.load(os.path.join(data_dir, source_file))[:, :joints_num] + try: + name = ''.join(source_file[:-7].split('_')) + '.npy' + data, ground_positions, positions, l_velocity = process_file(source_data, 0.05) + rec_ric_data = recover_from_ric(torch.from_numpy(data).unsqueeze(0).float(), joints_num) + if np.isnan(rec_ric_data.numpy()).any(): + print(source_file) + continue + np.save(pjoin(save_dir1, name), rec_ric_data.squeeze().numpy()) + np.save(pjoin(save_dir2, name), data) + frame_num += data.shape[0] + except Exception as e: + print(source_file) + print(e) + + print('Total clips: %d, Frames: %d, Duration: %fm' % + (len(source_list), frame_num, frame_num / 12.5 / 60)) + + +def traj_global2vel(traj_positions, traj_rot): + + # traj_positions [bs, 2 (x,z), seqlen] + # traj_positions [bs, 1 (z+, rad), seqlen] + # return first 3 hml enries [bs, 3, seqlen-1] + + # skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu") + # # (seq_len, joints_num, 4) + # quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True) + + bs, _, seqlen = traj_positions.shape + traj_positions = traj_positions.permute(0, 2, 1) + euler = torch.zeros([bs, 3, seqlen], dtype=traj_rot.dtype, device=traj_rot.device) + euler[:, 1:2] = traj_rot + euler = euler.permute(0, 2, 1).contiguous() + traj_rot_quat = euler2quat(euler, 'yxz', deg=False) + + # '''Quaternion to continuous 6D''' + # cont_6d_params = quaternion_to_cont6d_np(quat_params) + # # (seq_len, 4) + r_rot = traj_rot_quat.clone() + # print(r_rot[0]) + '''Root Linear Velocity''' + # (seq_len - 1, 3) + velocity = torch.zeros_like(euler[:, 1:, :]) + velocity[:, :, [0,2]] = (traj_positions[:, 1:, :] - traj_positions[:, :-1, :]).clone() + # print(r_rot.shape, velocity.shape) + velocity = qrot(r_rot[:, 1:], velocity) + '''Root Angular Velocity''' + # (seq_len - 1, 4) + r_velocity = qmul(r_rot[:, 1:].contiguous(), qinv(r_rot[:, :-1])) + # (seq_len, joints_num, 4) + + r_velocity = torch.arcsin(r_velocity[:, :, 2:3]) + l_velocity = velocity[:, :, [0, 2]] + # print(r_velocity.shape, l_velocity.shape, root_y.shape) + root_data = torch.cat([r_velocity, l_velocity], axis=-1).permute(0, 2, 1)[:, :, None] + + return root_data + +def get_target_location(motion, mean, std, lengths, joints_num, all_goal_joint_names, target_joint_names, is_heading): + assert (lengths == lengths[0]).all(), 'currently supporting only fixed length' + batch_size = motion.shape[0] + extended_goal_joint_names = all_goal_joint_names + ['traj', 'heading'] # todo: fix hardcoded indexing that assumes traj and heading are last + + # output tensor + target_loc = torch.zeros((batch_size, len(extended_goal_joint_names), 3, lengths[0]), dtype=motion.dtype, device=motion.device) # n_samples x (n_target_joints+1) x 3 x n_frames + + # hml to abs loc (all joints, not only the requested ones) + joints_loc = hml_to_abs_loc(motion, mean, std, joints_num) + pelvis_loc = HML_JOINT_NAMES.index('pelvis') + joints_loc = torch.concat([joints_loc, joints_loc[:, pelvis_loc:pelvis_loc+1]], dim=1) # concatenate the pelvis location to be used for traj + + # joint names to indices + HML_JOINT_NAMES_w_traj = HML_JOINT_NAMES + ['traj'] + for sample_idx in range(batch_size): + req_joint_idx_in = [HML_JOINT_NAMES_w_traj.index(name) for name in target_joint_names[sample_idx]] + req_joint_idx_out = [extended_goal_joint_names.index(name) for name in target_joint_names[sample_idx]] + + target_loc[sample_idx, req_joint_idx_out] = joints_loc[sample_idx, req_joint_idx_in] # assign joints loc to output tensor + + target_loc[:, -2, 1] = 0 # zero the y axis for the trajectory + + # last entry is the heading + heading = recover_root_rot_heading_ang(joints_loc) + target_loc[:, -1:, 0][is_heading] = heading[is_heading] + + return target_loc[..., -1] # return last frame only + + +def hml_to_abs_loc(motion, mean, std, joints_num): + # hml to abs loc (all joints, not only the requested ones) + unnormed_motion = (motion * std + mean).permute(0, 2, 3, 1).float() + joints_loc = recover_from_ric(unnormed_motion, joints_num) + joints_loc = joints_loc.view(-1, *joints_loc.shape[2:]).permute(0, 2, 3, 1) # n_samples x n_joints x 3 x n_frames + return joints_loc + + +def sample_goal(batch_size, device, force_joints=None): + if force_joints is None: + choices = np.array(['None', 'traj', 'pelvis'] + HML_EE_JOINT_NAMES) # todo: fix hardcoded 'pelvis' ('traj' is ok because it's our convention) + none_prob = 0.5 # todo: maybe convert to an argument + probabilities = torch.ones(len(choices)) * (1-none_prob) / (len(choices) -1) + probabilities[0] = none_prob # None's probability + assert probabilities.sum() - 1 < 1e-6, 'probabilities should sum to 1' + max_goal_joints_per_sample = 2 + # target_cond_idx = torch.randint(low=0, high=len(choices), size=(batch_size,max_goal_joints_per_sample)) + target_cond_idx = torch.multinomial(probabilities, max_goal_joints_per_sample * batch_size, replacement=True).view(batch_size, max_goal_joints_per_sample) + names = choices[target_cond_idx] + names = np.array([np.unique(name) for name in names]) + names = np.array([np.delete(name, np.argwhere(name=='None')) for name in names]) + is_heading = torch.bernoulli(torch.ones(batch_size, device=device) * .5).to(bool) + else: + options = get_allowed_joint_options(force_joints) + names = [copy(random.choice(options)) for _ in range(batch_size)] + is_heading = torch.zeros(batch_size, device=device).to(bool) + for i, n in enumerate(names): + if 'heading' in n: + is_heading[i] = True + del n[n.index('heading')] + return names, is_heading + +def get_allowed_joint_options(config_name): + if config_name == 'DIMP_FULL': + return [['pelvis', 'heading'], ['pelvis', 'head'], ['traj', 'heading'], ['right_wrist', 'heading'], ['left_wrist', 'heading'], ['right_foot', 'heading'], ['left_foot', 'heading']] + elif config_name == 'DIMP_FINAL': + return [['pelvis', 'heading'], ['traj', 'heading'], ['right_wrist', 'heading'], ['left_wrist', 'heading'], ['right_foot', 'heading'], ['left_foot', 'heading'], []] + elif config_name == 'DIMP_SLIM': + return [['pelvis', 'heading'], ['pelvis', 'head'], ['traj', 'heading'], ['left_wrist', 'heading'], ['left_foot', 'heading']] + elif config_name == 'DIMP_BENCH': + return [['pelvis', 'heading'], ['pelvis', 'head']] + elif config_name == 'PURE_T2M': + return [[]] + else: + return [config_name.split(',')] + diff --git a/motion_diffusion_model/data_loaders/humanml/utils/get_opt.py b/motion_diffusion_model/data_loaders/humanml/utils/get_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..c331b4dde8cc71c2ce33916945d75a43fc32308f --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/utils/get_opt.py @@ -0,0 +1,81 @@ +import os +from argparse import Namespace +import re +from os.path import join as pjoin +from data_loaders.humanml.utils.word_vectorizer import POS_enumerator + + +def is_float(numStr): + flag = False + numStr = str(numStr).strip().lstrip('-').lstrip('+') # 去除正数(+)、负数(-)符号 + try: + reg = re.compile(r'^[-+]?[0-9]+\.[0-9]+$') + res = reg.match(str(numStr)) + if res: + flag = True + except Exception as ex: + print("is_float() - error: " + str(ex)) + return flag + + +def is_number(numStr): + flag = False + numStr = str(numStr).strip().lstrip('-').lstrip('+') # 去除正数(+)、负数(-)符号 + if str(numStr).isdigit(): + flag = True + return flag + + +def get_opt(opt_path, device): + opt = Namespace() + opt_dict = vars(opt) + + skip = ('-------------- End ----------------', + '------------ Options -------------', + '\n') + print('Reading', opt_path) + with open(opt_path) as f: + for line in f: + if line.strip() not in skip: + # print(line.strip()) + key, value = line.strip().split(': ') + if value in ('True', 'False'): + opt_dict[key] = bool(value) + elif is_float(value): + opt_dict[key] = float(value) + elif is_number(value): + opt_dict[key] = int(value) + else: + opt_dict[key] = str(value) + + # print(opt) + opt_dict['which_epoch'] = 'latest' + opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) + opt.model_dir = pjoin(opt.save_root, 'model') + opt.meta_dir = pjoin(opt.save_root, 'meta') + + if opt.dataset_name == 't2m': + opt.data_root = './dataset/HumanML3D' + opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') + opt.text_dir = pjoin(opt.data_root, 'texts') + opt.joints_num = 22 + opt.dim_pose = 263 + opt.max_motion_length = 196 + elif opt.dataset_name == 'kit': + opt.data_root = './dataset/KIT-ML' + opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') + opt.text_dir = pjoin(opt.data_root, 'texts') + opt.joints_num = 21 + opt.dim_pose = 251 + opt.max_motion_length = 196 + else: + raise KeyError('Dataset not recognized') + + opt.dim_word = 300 + opt.num_classes = 200 // opt.unit_length + opt.dim_pos_ohot = len(POS_enumerator) + opt.is_train = False + opt.is_continue = False + opt.device = device + + return opt \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml/utils/metrics.py b/motion_diffusion_model/data_loaders/humanml/utils/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..4d6c68192ccecc3a1d739d2e4e53ffb12800efc1 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/utils/metrics.py @@ -0,0 +1,146 @@ +import numpy as np +from scipy import linalg + + +# (X - X_train)*(X - X_train) = -2X*X_train + X*X + X_train*X_train +def euclidean_distance_matrix(matrix1, matrix2): + """ + Params: + -- matrix1: N1 x D + -- matrix2: N2 x D + Returns: + -- dist: N1 x N2 + dist[i, j] == distance(matrix1[i], matrix2[j]) + """ + assert matrix1.shape[1] == matrix2.shape[1] + d1 = -2 * np.dot(matrix1, matrix2.T) # shape (num_test, num_train) + d2 = np.sum(np.square(matrix1), axis=1, keepdims=True) # shape (num_test, 1) + d3 = np.sum(np.square(matrix2), axis=1) # shape (num_train, ) + dists = np.sqrt(d1 + d2 + d3) # broadcasting + return dists + +def calculate_top_k(mat, top_k): + size = mat.shape[0] + gt_mat = np.expand_dims(np.arange(size), 1).repeat(size, 1) + bool_mat = (mat == gt_mat) + correct_vec = False + top_k_list = [] + for i in range(top_k): +# print(correct_vec, bool_mat[:, i]) + correct_vec = (correct_vec | bool_mat[:, i]) + # print(correct_vec) + top_k_list.append(correct_vec[:, None]) + top_k_mat = np.concatenate(top_k_list, axis=1) + return top_k_mat + + +def calculate_R_precision(embedding1, embedding2, top_k, sum_all=False): + dist_mat = euclidean_distance_matrix(embedding1, embedding2) + argmax = np.argsort(dist_mat, axis=1) + top_k_mat = calculate_top_k(argmax, top_k) + if sum_all: + return top_k_mat.sum(axis=0) + else: + return top_k_mat + + +def calculate_matching_score(embedding1, embedding2, sum_all=False): + assert len(embedding1.shape) == 2 + assert embedding1.shape[0] == embedding2.shape[0] + assert embedding1.shape[1] == embedding2.shape[1] + + dist = linalg.norm(embedding1 - embedding2, axis=1) + if sum_all: + return dist.sum(axis=0) + else: + return dist + + + +def calculate_activation_statistics(activations): + """ + Params: + -- activation: num_samples x dim_feat + Returns: + -- mu: dim_feat + -- sigma: dim_feat x dim_feat + """ + mu = np.mean(activations, axis=0) + cov = np.cov(activations, rowvar=False) + return mu, cov + + +def calculate_diversity(activation, diversity_times): + assert len(activation.shape) == 2 + assert activation.shape[0] > diversity_times + num_samples = activation.shape[0] + + first_indices = np.random.choice(num_samples, diversity_times, replace=False) + second_indices = np.random.choice(num_samples, diversity_times, replace=False) + dist = linalg.norm(activation[first_indices] - activation[second_indices], axis=1) + return dist.mean() + + +def calculate_multimodality(activation, multimodality_times): + assert len(activation.shape) == 3 + assert activation.shape[1] > multimodality_times + num_per_sent = activation.shape[1] + + first_dices = np.random.choice(num_per_sent, multimodality_times, replace=False) + second_dices = np.random.choice(num_per_sent, multimodality_times, replace=False) + dist = linalg.norm(activation[:, first_dices] - activation[:, second_dices], axis=2) + return dist.mean() + + +def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): + """Numpy implementation of the Frechet Distance. + The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) + and X_2 ~ N(mu_2, C_2) is + d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). + Stable version by Dougal J. Sutherland. + Params: + -- mu1 : Numpy array containing the activations of a layer of the + inception net (like returned by the function 'get_predictions') + for generated samples. + -- mu2 : The sample mean over activations, precalculated on an + representative dataset set. + -- sigma1: The covariance matrix over activations for generated samples. + -- sigma2: The covariance matrix over activations, precalculated on an + representative dataset set. + Returns: + -- : The Frechet Distance. + """ + + mu1 = np.atleast_1d(mu1) + mu2 = np.atleast_1d(mu2) + + sigma1 = np.atleast_2d(sigma1) + sigma2 = np.atleast_2d(sigma2) + + assert mu1.shape == mu2.shape, \ + 'Training and test mean vectors have different lengths' + assert sigma1.shape == sigma2.shape, \ + 'Training and test covariances have different dimensions' + + diff = mu1 - mu2 + + # Product might be almost singular + covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) + if not np.isfinite(covmean).all(): + msg = ('fid calculation produces singular product; ' + 'adding %s to diagonal of cov estimates') % eps + print(msg) + offset = np.eye(sigma1.shape[0]) * eps + covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) + + # Numerical error might give slight imaginary component + if np.iscomplexobj(covmean): + if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): + m = np.max(np.abs(covmean.imag)) + raise ValueError('Imaginary component {}'.format(m)) + covmean = covmean.real + + tr_covmean = np.trace(covmean) + + return (diff.dot(diff) + np.trace(sigma1) + + np.trace(sigma2) - 2 * tr_covmean) \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml/utils/paramUtil.py b/motion_diffusion_model/data_loaders/humanml/utils/paramUtil.py new file mode 100644 index 0000000000000000000000000000000000000000..a9f1708b85ca80a9051cb3675cec9b999a0d0e2b --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/utils/paramUtil.py @@ -0,0 +1,63 @@ +import numpy as np + +# Define a kinematic tree for the skeletal struture +kit_kinematic_chain = [[0, 11, 12, 13, 14, 15], [0, 16, 17, 18, 19, 20], [0, 1, 2, 3, 4], [3, 5, 6, 7], [3, 8, 9, 10]] + +kit_raw_offsets = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 1, 0], + [0, 1, 0], + [1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [-1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [0, 0, 1], + [0, 0, 1], + [-1, 0, 0], + [0, -1, 0], + [0, -1, 0], + [0, 0, 1], + [0, 0, 1] + ] +) + +t2m_raw_offsets = np.array([[0,0,0], + [1,0,0], + [-1,0,0], + [0,1,0], + [0,-1,0], + [0,-1,0], + [0,1,0], + [0,-1,0], + [0,-1,0], + [0,1,0], + [0,0,1], + [0,0,1], + [0,1,0], + [1,0,0], + [-1,0,0], + [0,0,1], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0], + [0,-1,0]]) + +t2m_kinematic_chain = [[0, 2, 5, 8, 11], [0, 1, 4, 7, 10], [0, 3, 6, 9, 12, 15], [9, 14, 17, 19, 21], [9, 13, 16, 18, 20]] +t2m_left_hand_chain = [[20, 22, 23, 24], [20, 34, 35, 36], [20, 25, 26, 27], [20, 31, 32, 33], [20, 28, 29, 30]] +t2m_right_hand_chain = [[21, 43, 44, 45], [21, 46, 47, 48], [21, 40, 41, 42], [21, 37, 38, 39], [21, 49, 50, 51]] + + +kit_tgt_skel_id = '03950' + +t2m_tgt_skel_id = '000021' + diff --git a/motion_diffusion_model/data_loaders/humanml/utils/plot_script.py b/motion_diffusion_model/data_loaders/humanml/utils/plot_script.py new file mode 100644 index 0000000000000000000000000000000000000000..91e4c02b487f4a5d17a80a11d199ee815ea7d53c --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/utils/plot_script.py @@ -0,0 +1,148 @@ +import math +import numpy as np +import matplotlib +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D +from matplotlib.animation import FuncAnimation, FFMpegFileWriter +from mpl_toolkits.mplot3d.art3d import Poly3DCollection +import mpl_toolkits.mplot3d.axes3d as p3 +# import cv2 +from textwrap import wrap +from moviepy.editor import VideoClip +from moviepy.video.io.bindings import mplfig_to_npimage + +def list_cut_average(ll, intervals): + if intervals == 1: + return ll + + bins = math.ceil(len(ll) * 1.0 / intervals) + ll_new = [] + for i in range(bins): + l_low = intervals * i + l_high = l_low + intervals + l_high = l_high if l_high < len(ll) else len(ll) + ll_new.append(np.mean(ll[l_low:l_high])) + return ll_new + + +def plot_3d_motion(save_path, kinematic_tree, joints, title, dataset, figsize=(3, 3), fps=120, radius=3, + vis_mode='default', gt_frames=[]): + matplotlib.use('Agg') + + title_per_frame = type(title) == list + if title_per_frame: + assert len(title) == len(joints), 'Title length should match the number of frames' + title = ['\n'.join(wrap(s, 20)) for s in title] + else: + title = '\n'.join(wrap(title, 20)) + + def init(): + ax.set_xlim3d([-radius / 2, radius / 2]) + ax.set_ylim3d([0, radius]) + ax.set_zlim3d([-radius / 3., radius * 2 / 3.]) + # print(title) + # fig.suptitle(title, fontsize=10) # Using dynamic title instead + ax.grid(b=False) + + def plot_xzPlane(minx, maxx, miny, minz, maxz): + ## Plot a plane XZ + verts = [ + [minx, miny, minz], + [minx, miny, maxz], + [maxx, miny, maxz], + [maxx, miny, minz] + ] + xz_plane = Poly3DCollection([verts]) + xz_plane.set_facecolor((0.5, 0.5, 0.5, 0.5)) + ax.add_collection3d(xz_plane) + + # return ax + + # (seq_len, joints_num, 3) + data = joints.copy().reshape(len(joints), -1, 3) + + # preparation related to specific datasets + if dataset == 'kit': + data *= 0.003 # scale for visualization + elif dataset == 'humanml': + data *= 1.3 # scale for visualization + elif dataset in ['humanact12', 'uestc']: + data *= -1.5 # reverse axes, scale for visualization + + fig = plt.figure(figsize=figsize) + plt.tight_layout() + ax = p3.Axes3D(fig) + init() + MINS = data.min(axis=0).min(axis=0) + MAXS = data.max(axis=0).max(axis=0) + colors_blue = ["#4D84AA", "#5B9965", "#61CEB9", "#34C1E2", "#80B79A"] # GT color + colors_orange = ["#DD5A37", "#D69E00", "#B75A39", "#FF6D00", "#DDB50E"] # Generation color + colors = colors_orange + if vis_mode == 'upper_body': # lower body taken fixed to input motion + colors[0] = colors_blue[0] + colors[1] = colors_blue[1] + elif vis_mode == 'gt': + colors = colors_blue + + n_frames = data.shape[0] + # print(dataset.shape) + + height_offset = MINS[1] + data[:, :, 1] -= height_offset + trajec = data[:, 0, [0, 2]] # memorize original x,z pelvis values + + # locate x,z pelvis values of ** each frame ** at zero + data[..., 0] -= data[:, 0:1, 0] + data[..., 2] -= data[:, 0:1, 2] + + # print(trajec.shape) + + def update(index): + # sometimes index is equal to n_frames/fps due to floating point issues. in such case, we duplicate the last frame + index = min(n_frames-1, int(index*fps)) + ax.clear() + ax.view_init(elev=120, azim=-90) + ax.dist = 7.5 + + # Dynamic title + if title_per_frame: + _title = title[index] + else: + _title = title + _title += f' [{index}]' + fig.suptitle(_title, fontsize=10) + + plot_xzPlane(MINS[0] - trajec[index, 0], MAXS[0] - trajec[index, 0], 0, MINS[2] - trajec[index, 1], + MAXS[2] - trajec[index, 1]) + + used_colors = colors_blue if index in gt_frames else colors + for i, (chain, color) in enumerate(zip(kinematic_tree, used_colors)): + if i < 5: + linewidth = 4.0 + else: + linewidth = 2.0 + ax.plot3D(data[index, chain, 0], data[index, chain, 1], data[index, chain, 2], linewidth=linewidth, + color=color) + # print(trajec[:index, 0].shape) + + plt.axis('off') + ax.set_axis_off() + ax.set_xticklabels([]) + ax.set_yticklabels([]) + ax.set_zticklabels([]) + + # Hide grid lines + ax.grid(False) + + # Hide axes ticks + ax.set_xticks([]) + ax.set_yticks([]) + ax.set_zticks([]) + + + return mplfig_to_npimage(fig) + + ani = VideoClip(update) + + plt.close() + return ani \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml/utils/utils.py b/motion_diffusion_model/data_loaders/humanml/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..34ae37ea45b27355546140306e1079d2031a838e --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/utils/utils.py @@ -0,0 +1,167 @@ +import os +import numpy as np +# import cv2 +from PIL import Image +from data_loaders.humanml.utils import paramUtil +import math +import time +import matplotlib.pyplot as plt +from scipy.ndimage import gaussian_filter + + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + +COLORS = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], + [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], + [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] + +MISSING_VALUE = -1 + +def save_image(image_numpy, image_path): + img_pil = Image.fromarray(image_numpy) + img_pil.save(image_path) + + +def save_logfile(log_loss, save_path): + with open(save_path, 'wt') as f: + for k, v in log_loss.items(): + w_line = k + for digit in v: + w_line += ' %.3f' % digit + f.write(w_line + '\n') + + +def print_current_loss(start_time, niter_state, losses, epoch=None, sub_epoch=None, + inner_iter=None, tf_ratio=None, sl_steps=None): + + def as_minutes(s): + m = math.floor(s / 60) + s -= m * 60 + return '%dm %ds' % (m, s) + + def time_since(since, percent): + now = time.time() + s = now - since + es = s / percent + rs = es - s + return '%s (- %s)' % (as_minutes(s), as_minutes(rs)) + + if epoch is not None: + print('epoch: %3d niter: %6d sub_epoch: %2d inner_iter: %4d' % (epoch, niter_state, sub_epoch, inner_iter), end=" ") + + # message = '%s niter: %d completed: %3d%%)' % (time_since(start_time, niter_state / total_niters), + # niter_state, niter_state / total_niters * 100) + now = time.time() + message = '%s'%(as_minutes(now - start_time)) + + for k, v in losses.items(): + message += ' %s: %.4f ' % (k, v) + message += ' sl_length:%2d tf_ratio:%.2f'%(sl_steps, tf_ratio) + print(message) + +def print_current_loss_decomp(start_time, niter_state, total_niters, losses, epoch=None, inner_iter=None): + + def as_minutes(s): + m = math.floor(s / 60) + s -= m * 60 + return '%dm %ds' % (m, s) + + def time_since(since, percent): + now = time.time() + s = now - since + es = s / percent + rs = es - s + return '%s (- %s)' % (as_minutes(s), as_minutes(rs)) + + print('epoch: %03d inner_iter: %5d' % (epoch, inner_iter), end=" ") + # now = time.time() + message = '%s niter: %07d completed: %3d%%)'%(time_since(start_time, niter_state / total_niters), niter_state, niter_state / total_niters * 100) + for k, v in losses.items(): + message += ' %s: %.4f ' % (k, v) + print(message) + + +def compose_gif_img_list(img_list, fp_out, duration): + img, *imgs = [Image.fromarray(np.array(image)) for image in img_list] + img.save(fp=fp_out, format='GIF', append_images=imgs, optimize=False, + save_all=True, loop=0, duration=duration) + + +def save_images(visuals, image_path): + if not os.path.exists(image_path): + os.makedirs(image_path) + + for i, (label, img_numpy) in enumerate(visuals.items()): + img_name = '%d_%s.jpg' % (i, label) + save_path = os.path.join(image_path, img_name) + save_image(img_numpy, save_path) + + +def save_images_test(visuals, image_path, from_name, to_name): + if not os.path.exists(image_path): + os.makedirs(image_path) + + for i, (label, img_numpy) in enumerate(visuals.items()): + img_name = "%s_%s_%s" % (from_name, to_name, label) + save_path = os.path.join(image_path, img_name) + save_image(img_numpy, save_path) + + +def compose_and_save_img(img_list, save_dir, img_name, col=4, row=1, img_size=(256, 200)): + # print(col, row) + compose_img = compose_image(img_list, col, row, img_size) + if not os.path.exists(save_dir): + os.makedirs(save_dir) + img_path = os.path.join(save_dir, img_name) + # print(img_path) + compose_img.save(img_path) + + +def compose_image(img_list, col, row, img_size): + to_image = Image.new('RGB', (col * img_size[0], row * img_size[1])) + for y in range(0, row): + for x in range(0, col): + from_img = Image.fromarray(img_list[y * col + x]) + # print((x * img_size[0], y*img_size[1], + # (x + 1) * img_size[0], (y + 1) * img_size[1])) + paste_area = (x * img_size[0], y*img_size[1], + (x + 1) * img_size[0], (y + 1) * img_size[1]) + to_image.paste(from_img, paste_area) + # to_image[y*img_size[1]:(y + 1) * img_size[1], x * img_size[0] :(x + 1) * img_size[0]] = from_img + return to_image + + +def plot_loss_curve(losses, save_path, intervals=500): + plt.figure(figsize=(10, 5)) + plt.title("Loss During Training") + for key in losses.keys(): + plt.plot(list_cut_average(losses[key], intervals), label=key) + plt.xlabel("Iterations/" + str(intervals)) + plt.ylabel("Loss") + plt.legend() + plt.savefig(save_path) + plt.show() + + +def list_cut_average(ll, intervals): + if intervals == 1: + return ll + + bins = math.ceil(len(ll) * 1.0 / intervals) + ll_new = [] + for i in range(bins): + l_low = intervals * i + l_high = l_low + intervals + l_high = l_high if l_high < len(ll) else len(ll) + ll_new.append(np.mean(ll[l_low:l_high])) + return ll_new + + +def motion_temporal_filter(motion, sigma=1): + motion = motion.reshape(motion.shape[0], -1) + for i in range(motion.shape[1]): + motion[:, i] = gaussian_filter(motion[:, i], sigma=sigma, mode="nearest") + return motion.reshape(motion.shape[0], -1, 3) + diff --git a/motion_diffusion_model/data_loaders/humanml/utils/word_vectorizer.py b/motion_diffusion_model/data_loaders/humanml/utils/word_vectorizer.py new file mode 100644 index 0000000000000000000000000000000000000000..68c5956ff39f840d03c9a352e65291d26e2dfbd4 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml/utils/word_vectorizer.py @@ -0,0 +1,80 @@ +import numpy as np +import pickle +from os.path import join as pjoin + +POS_enumerator = { + 'VERB': 0, + 'NOUN': 1, + 'DET': 2, + 'ADP': 3, + 'NUM': 4, + 'AUX': 5, + 'PRON': 6, + 'ADJ': 7, + 'ADV': 8, + 'Loc_VIP': 9, + 'Body_VIP': 10, + 'Obj_VIP': 11, + 'Act_VIP': 12, + 'Desc_VIP': 13, + 'OTHER': 14, +} + +Loc_list = ('left', 'right', 'clockwise', 'counterclockwise', 'anticlockwise', 'forward', 'back', 'backward', + 'up', 'down', 'straight', 'curve') + +Body_list = ('arm', 'chin', 'foot', 'feet', 'face', 'hand', 'mouth', 'leg', 'waist', 'eye', 'knee', 'shoulder', 'thigh') + +Obj_List = ('stair', 'dumbbell', 'chair', 'window', 'floor', 'car', 'ball', 'handrail', 'baseball', 'basketball') + +Act_list = ('walk', 'run', 'swing', 'pick', 'bring', 'kick', 'put', 'squat', 'throw', 'hop', 'dance', 'jump', 'turn', + 'stumble', 'dance', 'stop', 'sit', 'lift', 'lower', 'raise', 'wash', 'stand', 'kneel', 'stroll', + 'rub', 'bend', 'balance', 'flap', 'jog', 'shuffle', 'lean', 'rotate', 'spin', 'spread', 'climb') + +Desc_list = ('slowly', 'carefully', 'fast', 'careful', 'slow', 'quickly', 'happy', 'angry', 'sad', 'happily', + 'angrily', 'sadly') + +VIP_dict = { + 'Loc_VIP': Loc_list, + 'Body_VIP': Body_list, + 'Obj_VIP': Obj_List, + 'Act_VIP': Act_list, + 'Desc_VIP': Desc_list, +} + + +class WordVectorizer(object): + def __init__(self, meta_root, prefix): + vectors = np.load(pjoin(meta_root, '%s_data.npy'%prefix)) + words = pickle.load(open(pjoin(meta_root, '%s_words.pkl'%prefix), 'rb')) + word2idx = pickle.load(open(pjoin(meta_root, '%s_idx.pkl'%prefix), 'rb')) + self.word2vec = {w: vectors[word2idx[w]] for w in words} + + def _get_pos_ohot(self, pos): + pos_vec = np.zeros(len(POS_enumerator)) + if pos in POS_enumerator: + pos_vec[POS_enumerator[pos]] = 1 + else: + pos_vec[POS_enumerator['OTHER']] = 1 + return pos_vec + + def __len__(self): + return len(self.word2vec) + + def __getitem__(self, item): + word, pos = item.split('/') + if word in self.word2vec: + word_vec = self.word2vec[word] + vip_pos = None + for key, values in VIP_dict.items(): + if word in values: + vip_pos = key + break + if vip_pos is not None: + pos_vec = self._get_pos_ohot(vip_pos) + else: + pos_vec = self._get_pos_ohot(pos) + else: + word_vec = self.word2vec['unk'] + pos_vec = self._get_pos_ohot('OTHER') + return word_vec, pos_vec \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/humanml_utils.py b/motion_diffusion_model/data_loaders/humanml_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..34d92297790e6ea19dc367dbd53965b58aef5e57 --- /dev/null +++ b/motion_diffusion_model/data_loaders/humanml_utils.py @@ -0,0 +1,60 @@ +import numpy as np + +HML_JOINT_NAMES = [ + 'pelvis', + 'left_hip', + 'right_hip', + 'spine1', + 'left_knee', + 'right_knee', + 'spine2', + 'left_ankle', + 'right_ankle', + 'spine3', + 'left_foot', + 'right_foot', + 'neck', + 'left_collar', + 'right_collar', + 'head', + 'left_shoulder', + 'right_shoulder', + 'left_elbow', + 'right_elbow', + 'left_wrist', + 'right_wrist', +] + +NUM_HML_JOINTS = len(HML_JOINT_NAMES) # 22 SMPLH body joints + +HML_EE_JOINT_NAMES = ['left_foot', 'right_foot', 'left_wrist', 'right_wrist', 'head'] +HML_LOWER_BODY_JOINTS = [HML_JOINT_NAMES.index(name) for name in ['pelvis', 'left_hip', 'right_hip', 'left_knee', 'right_knee', 'left_ankle', 'right_ankle', 'left_foot', 'right_foot',]] +SMPL_UPPER_BODY_JOINTS = [i for i in range(len(HML_JOINT_NAMES)) if i not in HML_LOWER_BODY_JOINTS] + + +# Recover global angle and positions for rotation data +# root_rot_velocity (B, seq_len, 1) +# root_linear_velocity (B, seq_len, 2) +# root_y (B, seq_len, 1) +# ric_data (B, seq_len, (joint_num - 1)*3) +# rot_data (B, seq_len, (joint_num - 1)*6) +# local_velocity (B, seq_len, joint_num*3) +# foot contact (B, seq_len, 4) +HML_ROOT_BINARY = np.array([True] + [False] * (NUM_HML_JOINTS-1)) +HML_ROOT_MASK = np.concatenate(([True]*(1+2+1), + HML_ROOT_BINARY[1:].repeat(3), + HML_ROOT_BINARY[1:].repeat(6), + HML_ROOT_BINARY.repeat(3), + [False] * 4)) +HML_ROOT_HORIZONTAL_MASK = np.concatenate(([True]*(1+2) + [False], + np.zeros_like(HML_ROOT_BINARY[1:].repeat(3)), + np.zeros_like(HML_ROOT_BINARY[1:].repeat(6)), + np.zeros_like(HML_ROOT_BINARY.repeat(3)), + [False] * 4)) +HML_LOWER_BODY_JOINTS_BINARY = np.array([i in HML_LOWER_BODY_JOINTS for i in range(NUM_HML_JOINTS)]) +HML_LOWER_BODY_MASK = np.concatenate(([True]*(1+2+1), + HML_LOWER_BODY_JOINTS_BINARY[1:].repeat(3), + HML_LOWER_BODY_JOINTS_BINARY[1:].repeat(6), + HML_LOWER_BODY_JOINTS_BINARY.repeat(3), + [True]*4)) +HML_UPPER_BODY_MASK = ~HML_LOWER_BODY_MASK \ No newline at end of file diff --git a/motion_diffusion_model/data_loaders/tensors.py b/motion_diffusion_model/data_loaders/tensors.py new file mode 100644 index 0000000000000000000000000000000000000000..7fbfbcec31faeecc8ef1463eeb8dd89982f633de --- /dev/null +++ b/motion_diffusion_model/data_loaders/tensors.py @@ -0,0 +1,94 @@ +import torch + +def lengths_to_mask(lengths, max_len): + # max_len = max(lengths) + mask = torch.arange(max_len, device=lengths.device).expand(len(lengths), max_len) < lengths.unsqueeze(1) + return mask + + +def collate_tensors(batch): + dims = batch[0].dim() + max_size = [max([b.size(i) for b in batch]) for i in range(dims)] + size = (len(batch),) + tuple(max_size) + canvas = batch[0].new_zeros(size=size) + for i, b in enumerate(batch): + sub_tensor = canvas[i] + for d in range(dims): + sub_tensor = sub_tensor.narrow(d, 0, b.size(d)) + sub_tensor.add_(b) + return canvas + + +def collate(batch): + notnone_batches = [b for b in batch if b is not None] + databatch = [b['inp'] for b in notnone_batches] + if 'lengths' in notnone_batches[0]: + lenbatch = [b['lengths'] for b in notnone_batches] + else: + lenbatch = [len(b['inp'][0][0]) for b in notnone_batches] + + + databatchTensor = collate_tensors(databatch) + lenbatchTensor = torch.as_tensor(lenbatch) + maskbatchTensor = lengths_to_mask(lenbatchTensor, databatchTensor.shape[-1]).unsqueeze(1).unsqueeze(1) # unqueeze for broadcasting + + motion = databatchTensor + cond = {'y': {'mask': maskbatchTensor, 'lengths': lenbatchTensor}} + + if 'text' in notnone_batches[0]: + textbatch = [b['text'] for b in notnone_batches] + cond['y'].update({'text': textbatch}) + + if 'tokens' in notnone_batches[0]: + textbatch = [b['tokens'] for b in notnone_batches] + cond['y'].update({'tokens': textbatch}) + + if 'action' in notnone_batches[0]: + actionbatch = [b['action'] for b in notnone_batches] + cond['y'].update({'action': torch.as_tensor(actionbatch).unsqueeze(1)}) + + # collate action textual names + if 'action_text' in notnone_batches[0]: + action_text = [b['action_text']for b in notnone_batches] + cond['y'].update({'action_text': action_text}) + + if 'prefix' in notnone_batches[0]: + cond['y'].update({'prefix': collate_tensors([b['prefix'] for b in notnone_batches])}) + + if 'orig_lengths' in notnone_batches[0]: + cond['y'].update({'orig_lengths': torch.as_tensor([b['orig_lengths'] for b in notnone_batches])}) + + if 'key' in notnone_batches[0]: + cond['y'].update({'db_key': [b['key'] for b in notnone_batches]}) + + return motion, cond + +# an adapter to our collate func +def t2m_collate(batch, target_batch_size): + repeat_factor = -(-target_batch_size // len(batch)) # Ceiling division + repeated_batch = batch * repeat_factor + full_batch = repeated_batch[:target_batch_size] # Truncate to the target batch size + # batch.sort(key=lambda x: x[3], reverse=True) + adapted_batch = [{ + 'inp': torch.tensor(b[4].T).float().unsqueeze(1), # [seqlen, J] -> [J, 1, seqlen] + 'text': b[2], #b[0]['caption'] + 'tokens': b[6], + 'lengths': b[5], + 'key': b[7] if len(b) > 7 else None, + } for b in full_batch] + return collate(adapted_batch) + + +def t2m_prefix_collate(batch, pred_len): + # batch.sort(key=lambda x: x[3], reverse=True) + adapted_batch = [{ + 'inp': torch.tensor(b[4].T).float().unsqueeze(1)[..., -pred_len:], # [seqlen, J] -> [J, 1, seqlen] + 'prefix': torch.tensor(b[4].T).float().unsqueeze(1)[..., :-pred_len], + 'text': b[2], #b[0]['caption'] + 'tokens': b[6], + 'lengths': pred_len, # b[5], + 'orig_lengths': b[5][0], # For evaluation + 'key': b[7] if len(b) > 7 else None, + } for b in batch] + return collate(adapted_batch) + diff --git a/motion_diffusion_model/diffusion/fp16_util.py b/motion_diffusion_model/diffusion/fp16_util.py new file mode 100644 index 0000000000000000000000000000000000000000..1ccb93e4843b6257c3151b763356ef501f1acec8 --- /dev/null +++ b/motion_diffusion_model/diffusion/fp16_util.py @@ -0,0 +1,236 @@ +""" +Helpers to train with 16-bit precision. +""" + +import numpy as np +import torch as th +import torch.nn as nn +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors + +from diffusion import logger + +INITIAL_LOG_LOSS_SCALE = 20.0 + + +def convert_module_to_f16(l): + """ + Convert primitive modules to float16. + """ + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + + +def convert_module_to_f32(l): + """ + Convert primitive modules to float32, undoing convert_module_to_f16(). + """ + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): + l.weight.data = l.weight.data.float() + if l.bias is not None: + l.bias.data = l.bias.data.float() + + +def make_master_params(param_groups_and_shapes): + """ + Copy model parameters into a (differently-shaped) list of full-precision + parameters. + """ + master_params = [] + for param_group, shape in param_groups_and_shapes: + master_param = nn.Parameter( + _flatten_dense_tensors( + [param.detach().float() for (_, param) in param_group] + ).view(shape) + ) + master_param.requires_grad = True + master_params.append(master_param) + return master_params + + +def model_grads_to_master_grads(param_groups_and_shapes, master_params): + """ + Copy the gradients from the model parameters into the master parameters + from make_master_params(). + """ + for master_param, (param_group, shape) in zip( + master_params, param_groups_and_shapes + ): + master_param.grad = _flatten_dense_tensors( + [param_grad_or_zeros(param) for (_, param) in param_group] + ).view(shape) + + +def master_params_to_model_params(param_groups_and_shapes, master_params): + """ + Copy the master parameter data back into the model parameters. + """ + # Without copying to a list, if a generator is passed, this will + # silently not copy any parameters. + for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes): + for (_, param), unflat_master_param in zip( + param_group, unflatten_master_params(param_group, master_param.view(-1)) + ): + param.detach().copy_(unflat_master_param) + + +def unflatten_master_params(param_group, master_param): + return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group]) + + +def get_param_groups_and_shapes(named_model_params): + named_model_params = list(named_model_params) + scalar_vector_named_params = ( + [(n, p) for (n, p) in named_model_params if p.ndim <= 1], + (-1), + ) + matrix_named_params = ( + [(n, p) for (n, p) in named_model_params if p.ndim > 1], + (1, -1), + ) + return [scalar_vector_named_params, matrix_named_params] + + +def master_params_to_state_dict( + model, param_groups_and_shapes, master_params, use_fp16 +): + if use_fp16: + state_dict = model.state_dict() + for master_param, (param_group, _) in zip( + master_params, param_groups_and_shapes + ): + for (name, _), unflat_master_param in zip( + param_group, unflatten_master_params(param_group, master_param.view(-1)) + ): + assert name in state_dict + state_dict[name] = unflat_master_param + else: + state_dict = model.state_dict() + for i, (name, _value) in enumerate(model.named_parameters()): + assert name in state_dict + state_dict[name] = master_params[i] + return state_dict + + +def state_dict_to_master_params(model, state_dict, use_fp16): + if use_fp16: + named_model_params = [ + (name, state_dict[name]) for name, _ in model.named_parameters() + ] + param_groups_and_shapes = get_param_groups_and_shapes(named_model_params) + master_params = make_master_params(param_groups_and_shapes) + else: + master_params = [state_dict[name] for name, _ in model.named_parameters()] + return master_params + + +def zero_master_grads(master_params): + for param in master_params: + param.grad = None + + +def zero_grad(model_params): + for param in model_params: + # Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group + if param.grad is not None: + param.grad.detach_() + param.grad.zero_() + + +def param_grad_or_zeros(param): + if param.grad is not None: + return param.grad.data.detach() + else: + return th.zeros_like(param) + + +class MixedPrecisionTrainer: + def __init__( + self, + *, + model, + use_fp16=False, + fp16_scale_growth=1e-3, + initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE, + ): + self.model = model + self.use_fp16 = use_fp16 + self.fp16_scale_growth = fp16_scale_growth + + self.model_params = list(self.model.parameters()) + self.master_params = self.model_params + self.param_groups_and_shapes = None + self.lg_loss_scale = initial_lg_loss_scale + + if self.use_fp16: + self.param_groups_and_shapes = get_param_groups_and_shapes( + self.model.named_parameters() + ) + self.master_params = make_master_params(self.param_groups_and_shapes) + self.model.convert_to_fp16() + + def zero_grad(self): + zero_grad(self.model_params) + + def backward(self, loss: th.Tensor): + if self.use_fp16: + loss_scale = 2 ** self.lg_loss_scale + (loss * loss_scale).backward() + else: + loss.backward() + + def optimize(self, opt: th.optim.Optimizer): + if self.use_fp16: + return self._optimize_fp16(opt) + else: + return self._optimize_normal(opt) + + def _optimize_fp16(self, opt: th.optim.Optimizer): + logger.logkv_mean("lg_loss_scale", self.lg_loss_scale) + model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params) + grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale) + if check_overflow(grad_norm): + self.lg_loss_scale -= 1 + logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}") + zero_master_grads(self.master_params) + return False + + logger.logkv_mean("grad_norm", grad_norm) + logger.logkv_mean("param_norm", param_norm) + + self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale)) + opt.step() + zero_master_grads(self.master_params) + master_params_to_model_params(self.param_groups_and_shapes, self.master_params) + self.lg_loss_scale += self.fp16_scale_growth + return True + + def _optimize_normal(self, opt: th.optim.Optimizer): + grad_norm, param_norm = self._compute_norms() + logger.logkv_mean("grad_norm", grad_norm) + logger.logkv_mean("param_norm", param_norm) + opt.step() + return True + + def _compute_norms(self, grad_scale=1.0): + grad_norm = 0.0 + param_norm = 0.0 + for p in self.master_params: + with th.no_grad(): + param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2 + if p.grad is not None: + grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2 + return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm) + + def master_params_to_state_dict(self, master_params): + return master_params_to_state_dict( + self.model, self.param_groups_and_shapes, master_params, self.use_fp16 + ) + + def state_dict_to_master_params(self, state_dict): + return state_dict_to_master_params(self.model, state_dict, self.use_fp16) + + +def check_overflow(value): + return (value == float("inf")) or (value == -float("inf")) or (value != value) diff --git a/motion_diffusion_model/diffusion/gaussian_diffusion.py b/motion_diffusion_model/diffusion/gaussian_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..93d929a86fd12de6db22d4d51296cd9636cf2ed8 --- /dev/null +++ b/motion_diffusion_model/diffusion/gaussian_diffusion.py @@ -0,0 +1,1615 @@ +# This code is based on https://github.com/openai/guided-diffusion +""" +This code started out as a PyTorch port of Ho et al's diffusion models: +https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py + +Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules. +""" + +import enum +import math + +import numpy as np +import torch +import torch as th +from copy import deepcopy +from diffusion.nn import mean_flat, sum_flat +from diffusion.losses import normal_kl, discretized_gaussian_log_likelihood +from data_loaders.humanml.scripts import motion_process +from utils.loss_util import masked_l2, masked_goal_l2 +from data_loaders.humanml.scripts.motion_process import get_target_location + +def get_named_beta_schedule(schedule_name, num_diffusion_timesteps, scale_betas=1.): + """ + Get a pre-defined beta schedule for the given name. + + The beta schedule library consists of beta schedules which remain similar + in the limit of num_diffusion_timesteps. + Beta schedules may be added, but should not be removed or changed once + they are committed to maintain backwards compatibility. + """ + if schedule_name == "linear": + # Linear schedule from Ho et al, extended to work for any number of + # diffusion steps. + scale = scale_betas * 1000 / num_diffusion_timesteps + beta_start = scale * 0.0001 + beta_end = scale * 0.02 + return np.linspace( + beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64 + ) + elif schedule_name == "cosine": + return betas_for_alpha_bar( + num_diffusion_timesteps, + lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, + ) + else: + raise NotImplementedError(f"unknown beta schedule: {schedule_name}") + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +class ModelMeanType(enum.Enum): + """ + Which type of output the model predicts. + """ + + PREVIOUS_X = enum.auto() # the model predicts x_{t-1} + START_X = enum.auto() # the model predicts x_0 + EPSILON = enum.auto() # the model predicts epsilon + + +class ModelVarType(enum.Enum): + """ + What is used as the model's output variance. + + The LEARNED_RANGE option has been added to allow the model to predict + values between FIXED_SMALL and FIXED_LARGE, making its job easier. + """ + + LEARNED = enum.auto() + FIXED_SMALL = enum.auto() + FIXED_LARGE = enum.auto() + LEARNED_RANGE = enum.auto() + + +class LossType(enum.Enum): + MSE = enum.auto() # use raw MSE loss (and KL when learning variances) + RESCALED_MSE = ( + enum.auto() + ) # use raw MSE loss (with RESCALED_KL when learning variances) + KL = enum.auto() # use the variational lower-bound + RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB + + def is_vb(self): + return self == LossType.KL or self == LossType.RESCALED_KL + + +class GaussianDiffusion: + """ + Utilities for training and sampling diffusion models. + + Ported directly from here, and then adapted over time to further experimentation. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42 + + :param betas: a 1-D numpy array of betas for each diffusion timestep, + starting at T and going to 1. + :param model_mean_type: a ModelMeanType determining what the model outputs. + :param model_var_type: a ModelVarType determining how variance is output. + :param loss_type: a LossType determining the loss function to use. + :param rescale_timesteps: if True, pass floating point timesteps into the + model so that they are always scaled like in the + original paper (0 to 1000). + """ + + def __init__( + self, + *, + betas, + model_mean_type, + model_var_type, + loss_type, + rescale_timesteps=False, + lambda_rcxyz=0., + lambda_vel=0., + lambda_pose=1., + lambda_orient=1., + lambda_loc=1., + data_rep='rot6d', + lambda_root_vel=0., + lambda_vel_rcxyz=0., + lambda_fc=0., + lambda_target_loc=0., + **kargs, + ): + self.model_mean_type = model_mean_type + self.model_var_type = model_var_type + self.loss_type = loss_type + self.rescale_timesteps = rescale_timesteps + self.data_rep = data_rep + + if data_rep != 'rot_vel' and lambda_pose != 1.: + raise ValueError('lambda_pose is relevant only when training on velocities!') + self.lambda_pose = lambda_pose + self.lambda_orient = lambda_orient + self.lambda_loc = lambda_loc + + self.lambda_rcxyz = lambda_rcxyz + self.lambda_target_loc = lambda_target_loc + self.lambda_vel = lambda_vel + self.lambda_root_vel = lambda_root_vel + self.lambda_vel_rcxyz = lambda_vel_rcxyz + self.lambda_fc = lambda_fc + + if self.lambda_rcxyz > 0. or self.lambda_vel > 0. or self.lambda_root_vel > 0. or \ + self.lambda_vel_rcxyz > 0. or self.lambda_fc > 0. or self.lambda_target_loc > 0.: + assert self.loss_type == LossType.MSE, 'Geometric losses are supported by MSE loss type only!' + + # Use float64 for accuracy. + betas = np.array(betas, dtype=np.float64) + self.betas = betas + assert len(betas.shape) == 1, "betas must be 1-D" + assert (betas > 0).all() and (betas <= 1).all() + + self.num_timesteps = int(betas.shape[0]) + + alphas = 1.0 - betas + self.alphas_cumprod = np.cumprod(alphas, axis=0) + self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1]) + self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0) + assert self.alphas_cumprod_prev.shape == (self.num_timesteps,) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod) + self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod) + self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod) + self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod) + self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + self.posterior_variance = ( + betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) + ) + # log calculation clipped because the posterior variance is 0 at the + # beginning of the diffusion chain. + self.posterior_log_variance_clipped = np.log( + np.append(self.posterior_variance[1], self.posterior_variance[1:]) + ) + self.posterior_mean_coef1 = ( + betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) + ) + self.posterior_mean_coef2 = ( + (1.0 - self.alphas_cumprod_prev) + * np.sqrt(alphas) + / (1.0 - self.alphas_cumprod) + ) + + # self.l2_loss = lambda a, b: (a - b) ** 2 # th.nn.MSELoss(reduction='none') # must be None for handling mask later on. + self.masked_l2 = masked_l2 + + + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = ( + _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + ) + variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = _extract_into_tensor( + self.log_one_minus_alphas_cumprod, t, x_start.shape + ) + return mean, variance, log_variance + + def q_sample(self, x_start, t, noise=None): + """ + Diffuse the dataset for a given number of diffusion steps. + + In other words, sample from q(x_t | x_0). + + :param x_start: the initial dataset batch. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :param noise: if specified, the split-out normal noise. + :return: A noisy version of x_start. + """ + if noise is None: + noise = th.randn_like(x_start) + assert noise.shape == x_start.shape + return ( + _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) + * noise + ) + + def q_posterior_mean_variance(self, x_start, x_t, t): + """ + Compute the mean and variance of the diffusion posterior: + + q(x_{t-1} | x_t, x_0) + + """ + assert x_start.shape == x_t.shape + posterior_mean = ( + _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = _extract_into_tensor( + self.posterior_log_variance_clipped, t, x_t.shape + ) + assert ( + posterior_mean.shape[0] + == posterior_variance.shape[0] + == posterior_log_variance_clipped.shape[0] + == x_start.shape[0] + ) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance( + self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None + ): + """ + Apply the model to get p(x_{t-1} | x_t), as well as a prediction of + the initial x, x_0. + + :param model: the model, which takes a signal and a batch of timesteps + as input. + :param x: the [N x C x ...] tensor at time t. + :param t: a 1-D Tensor of timesteps. + :param clip_denoised: if True, clip the denoised signal into [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. Applies before + clip_denoised. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :return: a dict with the following keys: + - 'mean': the model mean output. + - 'variance': the model variance output. + - 'log_variance': the log of 'variance'. + - 'pred_xstart': the prediction for x_0. + """ + if model_kwargs is None: + model_kwargs = {} + + B, C = x.shape[:2] + assert t.shape == (B,) + model_output = model(x, self._scale_timesteps(t), **model_kwargs) + + if 'inpainting_mask' in model_kwargs['y'].keys() and 'inpainted_motion' in model_kwargs['y'].keys(): + inpainting_mask, inpainted_motion = model_kwargs['y']['inpainting_mask'], model_kwargs['y']['inpainted_motion'] + assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!' + assert model_output.shape == inpainting_mask.shape == inpainted_motion.shape + model_output = (model_output * ~inpainting_mask) + (inpainted_motion * inpainting_mask) + # print('model_output', model_output.shape, model_output) + # print('inpainting_mask', inpainting_mask.shape, inpainting_mask[0,0,0,:]) + # print('inpainted_motion', inpainted_motion.shape, inpainted_motion) + + if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]: + assert model_output.shape == (B, C * 2, *x.shape[2:]) + model_output, model_var_values = th.split(model_output, C, dim=1) + if self.model_var_type == ModelVarType.LEARNED: + model_log_variance = model_var_values + model_variance = th.exp(model_log_variance) + else: + min_log = _extract_into_tensor( + self.posterior_log_variance_clipped, t, x.shape + ) + max_log = _extract_into_tensor(np.log(self.betas), t, x.shape) + # The model_var_values is [-1, 1] for [min_var, max_var]. + frac = (model_var_values + 1) / 2 + model_log_variance = frac * max_log + (1 - frac) * min_log + model_variance = th.exp(model_log_variance) + else: + model_variance, model_log_variance = { + # for fixedlarge, we set the initial (log-)variance like so + # to get a better decoder log likelihood. + ModelVarType.FIXED_LARGE: ( + np.append(self.posterior_variance[1], self.betas[1:]), + np.log(np.append(self.posterior_variance[1], self.betas[1:])), + ), + ModelVarType.FIXED_SMALL: ( + self.posterior_variance, + self.posterior_log_variance_clipped, + ), + }[self.model_var_type] + # print('model_variance', model_variance) + # print('model_log_variance',model_log_variance) + # print('self.posterior_variance', self.posterior_variance) + # print('self.posterior_log_variance_clipped', self.posterior_log_variance_clipped) + # print('self.model_var_type', self.model_var_type) + + + model_variance = _extract_into_tensor(model_variance, t, x.shape) + model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape) + + def process_xstart(x): + if denoised_fn is not None: + x = denoised_fn(x) + if clip_denoised: + # print('clip_denoised', clip_denoised) + return x.clamp(-1, 1) + return x + + if self.model_mean_type == ModelMeanType.PREVIOUS_X: + pred_xstart = process_xstart( + self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output) + ) + model_mean = model_output + elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US! + if self.model_mean_type == ModelMeanType.START_X: + pred_xstart = process_xstart(model_output) + else: + pred_xstart = process_xstart( + self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) + ) + model_mean, _, _ = self.q_posterior_mean_variance( + x_start=pred_xstart, x_t=x, t=t + ) + else: + raise NotImplementedError(self.model_mean_type) + + assert ( + model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape + ) + return { + "mean": model_mean, + "variance": model_variance, + "log_variance": model_log_variance, + "pred_xstart": pred_xstart, + } + + def _predict_xstart_from_eps(self, x_t, t, eps): + assert x_t.shape == eps.shape + return ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps + ) + + def _predict_xstart_from_xprev(self, x_t, t, xprev): + assert x_t.shape == xprev.shape + return ( # (xprev - coef2*x_t) / coef1 + _extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev + - _extract_into_tensor( + self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape + ) + * x_t + ) + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - pred_xstart + ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _scale_timesteps(self, t): + if self.rescale_timesteps: + return t.float() * (1000.0 / self.num_timesteps) + return t + + def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None): + """ + Compute the mean for the previous step, given a function cond_fn that + computes the gradient of a conditional log probability with respect to + x. In particular, cond_fn computes grad(log(p(y|x))), and we want to + condition on y. + + This uses the conditioning strategy from Sohl-Dickstein et al. (2015). + """ + gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs) + new_mean = ( + p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float() + ) + return new_mean + + def condition_mean_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None): + """ + Compute the mean for the previous step, given a function cond_fn that + computes the gradient of a conditional log probability with respect to + x. In particular, cond_fn computes grad(log(p(y|x))), and we want to + condition on y. + + This uses the conditioning strategy from Sohl-Dickstein et al. (2015). + """ + gradient = cond_fn(x, t, p_mean_var, **model_kwargs) + new_mean = ( + p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float() + ) + return new_mean + + def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None): + """ + Compute what the p_mean_variance output would have been, should the + model's score function be conditioned by cond_fn. + + See condition_mean() for details on cond_fn. + + Unlike condition_mean(), this instead uses the conditioning strategy + from Song et al (2020). + """ + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + + eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) + eps = eps - (1 - alpha_bar).sqrt() * cond_fn( + x, self._scale_timesteps(t), **model_kwargs + ) + + out = p_mean_var.copy() + out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) + out["mean"], _, _ = self.q_posterior_mean_variance( + x_start=out["pred_xstart"], x_t=x, t=t + ) + return out + + def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None): + """ + Compute what the p_mean_variance output would have been, should the + model's score function be conditioned by cond_fn. + + See condition_mean() for details on cond_fn. + + Unlike condition_mean(), this instead uses the conditioning strategy + from Song et al (2020). + """ + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + + eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) + eps = eps - (1 - alpha_bar).sqrt() * cond_fn( + x, t, p_mean_var, **model_kwargs + ) + + out = p_mean_var.copy() + out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) + out["mean"], _, _ = self.q_posterior_mean_variance( + x_start=out["pred_xstart"], x_t=x, t=t + ) + return out + + def p_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + const_noise=False, + ): + """ + Sample x_{t-1} from the model at the given timestep. + + :param model: the model to sample from. + :param x: the current tensor at x_{t-1}. + :param t: the value of t, starting at 0 for the first diffusion step. + :param clip_denoised: if True, clip the x_start prediction to [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. + :param cond_fn: if not None, this is a gradient function that acts + similarly to the model. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :return: a dict containing the following keys: + - 'sample': a random sample from the model. + - 'pred_xstart': a prediction of x_0. + """ + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + noise = th.randn_like(x) + # print('const_noise', const_noise) + if const_noise: + noise = noise[[0]].repeat(x.shape[0], 1, 1, 1) + + nonzero_mask = ( + (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + ) # no noise when t == 0 + if cond_fn is not None: + out["mean"] = self.condition_mean( + cond_fn, out, x, t, model_kwargs=model_kwargs + ) + # print('mean', out["mean"].shape, out["mean"]) + # print('log_variance', out["log_variance"].shape, out["log_variance"]) + # print('nonzero_mask', nonzero_mask.shape, nonzero_mask) + sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise + return {"sample": sample, "pred_xstart": out["pred_xstart"]} + + def p_sample_with_grad( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + ): + """ + Sample x_{t-1} from the model at the given timestep. + + :param model: the model to sample from. + :param x: the current tensor at x_{t-1}. + :param t: the value of t, starting at 0 for the first diffusion step. + :param clip_denoised: if True, clip the x_start prediction to [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. + :param cond_fn: if not None, this is a gradient function that acts + similarly to the model. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :return: a dict containing the following keys: + - 'sample': a random sample from the model. + - 'pred_xstart': a prediction of x_0. + """ + with th.enable_grad(): + x = x.detach().requires_grad_() + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + noise = th.randn_like(x) + nonzero_mask = ( + (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + ) # no noise when t == 0 + if cond_fn is not None: + out["mean"] = self.condition_mean_with_grad( + cond_fn, out, x, t, model_kwargs=model_kwargs + ) + sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise + return {"sample": sample, "pred_xstart": out["pred_xstart"].detach()} + + def p_sample_loop( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + skip_timesteps=0, + init_image=None, + randomize_class=False, + cond_fn_with_grad=False, + dump_steps=None, + const_noise=False, + ): + """ + Generate samples from the model. + + :param model: the model module. + :param shape: the shape of the samples, (N, C, H, W). + :param noise: if specified, the noise from the encoder to sample. + Should be of the same shape as `shape`. + :param clip_denoised: if True, clip x_start predictions to [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. + :param cond_fn: if not None, this is a gradient function that acts + similarly to the model. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :param device: if specified, the device to create the samples on. + If not specified, use a model parameter's device. + :param progress: if True, show a tqdm progress bar. + :param const_noise: If True, will noise all samples with the same noise throughout sampling + :return: a non-differentiable batch of samples. + """ + final = None + if dump_steps is not None: + dump = [] + + if 'text' in model_kwargs['y'].keys(): + # encoding once instead of each iteration saves lots of time + model_kwargs['y']['text_embed'] = model.encode_text(model_kwargs['y']['text']) + + for i, sample in enumerate(self.p_sample_loop_progressive( + model, + shape, + noise=noise, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + device=device, + progress=progress, + skip_timesteps=skip_timesteps, + init_image=init_image, + randomize_class=randomize_class, + cond_fn_with_grad=cond_fn_with_grad, + const_noise=const_noise, + )): + if dump_steps is not None and i in dump_steps: + dump.append(deepcopy(sample["sample"])) + final = sample + if dump_steps is not None: + return dump + return final["sample"] + + def p_sample_loop_progressive( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + skip_timesteps=0, + init_image=None, + randomize_class=False, + cond_fn_with_grad=False, + const_noise=False, + ): + """ + Generate samples from the model and yield intermediate samples from + each timestep of diffusion. + + Arguments are the same as p_sample_loop(). + Returns a generator over dicts, where each dict is the return value of + p_sample(). + """ + if device is None: + device = next(model.parameters()).device + assert isinstance(shape, (tuple, list)) + if noise is not None: + img = noise + else: + img = th.randn(*shape, device=device) + + if skip_timesteps and init_image is None: + init_image = th.zeros_like(img) + + indices = list(range(self.num_timesteps - skip_timesteps))[::-1] + + if init_image is not None: + my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0] + img = self.q_sample(init_image, my_t, img) + + if progress: + # Lazy import so that we don't depend on tqdm. + from tqdm.auto import tqdm + + indices = tqdm(indices) + + for i in indices: + t = th.tensor([i] * shape[0], device=device) + if randomize_class and 'y' in model_kwargs: + model_kwargs['y'] = th.randint(low=0, high=model.num_classes, + size=model_kwargs['y'].shape, + device=model_kwargs['y'].device) + with th.no_grad(): + sample_fn = self.p_sample_with_grad if cond_fn_with_grad else self.p_sample + out = sample_fn( + model, + img, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + const_noise=const_noise, + ) + yield out + img = out["sample"] + + def ddim_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + eta=0.0, + ): + """ + Sample x_{t-1} from the model using DDIM. + + Same usage as p_sample(). + """ + out_orig = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + if cond_fn is not None: + out = self.condition_score(cond_fn, out_orig, x, t, model_kwargs=model_kwargs) + else: + out = out_orig + + # Usually our model outputs epsilon, but we re-derive it + # in case we used x_start or x_prev prediction. + eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) + + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) + sigma = ( + eta + * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) + * th.sqrt(1 - alpha_bar / alpha_bar_prev) + ) + # Equation 12. + noise = th.randn_like(x) + mean_pred = ( + out["pred_xstart"] * th.sqrt(alpha_bar_prev) + + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps + ) + nonzero_mask = ( + (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + ) # no noise when t == 0 + sample = mean_pred + nonzero_mask * sigma * noise + return {"sample": sample, "pred_xstart": out_orig["pred_xstart"]} + + def ddim_sample_with_grad( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + eta=0.0, + ): + """ + Sample x_{t-1} from the model using DDIM. + + Same usage as p_sample(). + """ + with th.enable_grad(): + x = x.detach().requires_grad_() + out_orig = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + if cond_fn is not None: + out = self.condition_score_with_grad(cond_fn, out_orig, x, t, + model_kwargs=model_kwargs) + else: + out = out_orig + + out["pred_xstart"] = out["pred_xstart"].detach() + + # Usually our model outputs epsilon, but we re-derive it + # in case we used x_start or x_prev prediction. + eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) + + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) + sigma = ( + eta + * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) + * th.sqrt(1 - alpha_bar / alpha_bar_prev) + ) + # Equation 12. + noise = th.randn_like(x) + mean_pred = ( + out["pred_xstart"] * th.sqrt(alpha_bar_prev) + + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps + ) + nonzero_mask = ( + (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + ) # no noise when t == 0 + sample = mean_pred + nonzero_mask * sigma * noise + return {"sample": sample, "pred_xstart": out_orig["pred_xstart"].detach()} + + def ddim_reverse_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + model_kwargs=None, + eta=0.0, + ): + """ + Sample x_{t+1} from the model using DDIM reverse ODE. + """ + assert eta == 0.0, "Reverse ODE only for deterministic path" + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + # Usually our model outputs epsilon, but we re-derive it + # in case we used x_start or x_prev prediction. + eps = ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x + - out["pred_xstart"] + ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape) + alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape) + + # Equation 12. reversed + mean_pred = ( + out["pred_xstart"] * th.sqrt(alpha_bar_next) + + th.sqrt(1 - alpha_bar_next) * eps + ) + + return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]} + + def ddim_sample_loop( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + eta=0.0, + skip_timesteps=0, + init_image=None, + randomize_class=False, + cond_fn_with_grad=False, + dump_steps=None, + const_noise=False, + ): + """ + Generate samples from the model using DDIM. + + Same usage as p_sample_loop(). + """ + if dump_steps is not None: + raise NotImplementedError() + if const_noise == True: + raise NotImplementedError() + + final = None + for sample in self.ddim_sample_loop_progressive( + model, + shape, + noise=noise, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + device=device, + progress=progress, + eta=eta, + skip_timesteps=skip_timesteps, + init_image=init_image, + randomize_class=randomize_class, + cond_fn_with_grad=cond_fn_with_grad, + ): + final = sample + return final["sample"] + + def ddim_sample_loop_progressive( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + eta=0.0, + skip_timesteps=0, + init_image=None, + randomize_class=False, + cond_fn_with_grad=False, + ): + """ + Use DDIM to sample from the model and yield intermediate samples from + each timestep of DDIM. + + Same usage as p_sample_loop_progressive(). + """ + if device is None: + device = next(model.parameters()).device + assert isinstance(shape, (tuple, list)) + if noise is not None: + img = noise + else: + img = th.randn(*shape, device=device) + + if skip_timesteps and init_image is None: + init_image = th.zeros_like(img) + + indices = list(range(self.num_timesteps - skip_timesteps))[::-1] + + if init_image is not None: + my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0] + img = self.q_sample(init_image, my_t, img) + + if progress: + # Lazy import so that we don't depend on tqdm. + from tqdm.auto import tqdm + + indices = tqdm(indices) + + for i in indices: + t = th.tensor([i] * shape[0], device=device) + if randomize_class and 'y' in model_kwargs: + model_kwargs['y'] = th.randint(low=0, high=model.num_classes, + size=model_kwargs['y'].shape, + device=model_kwargs['y'].device) + with th.no_grad(): + sample_fn = self.ddim_sample_with_grad if cond_fn_with_grad else self.ddim_sample + out = sample_fn( + model, + img, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + eta=eta, + ) + yield out + img = out["sample"] + + def plms_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + cond_fn_with_grad=False, + order=2, + old_out=None, + ): + """ + Sample x_{t-1} from the model using Pseudo Linear Multistep. + + Same usage as p_sample(). + """ + if not int(order) or not 1 <= order <= 4: + raise ValueError('order is invalid (should be int from 1-4).') + + def get_model_output(x, t): + with th.set_grad_enabled(cond_fn_with_grad and cond_fn is not None): + x = x.detach().requires_grad_() if cond_fn_with_grad else x + out_orig = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + if cond_fn is not None: + if cond_fn_with_grad: + out = self.condition_score_with_grad(cond_fn, out_orig, x, t, model_kwargs=model_kwargs) + x = x.detach() + else: + out = self.condition_score(cond_fn, out_orig, x, t, model_kwargs=model_kwargs) + else: + out = out_orig + + # Usually our model outputs epsilon, but we re-derive it + # in case we used x_start or x_prev prediction. + eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) + return eps, out, out_orig + + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) + eps, out, out_orig = get_model_output(x, t) + + if order > 1 and old_out is None: + # Pseudo Improved Euler + old_eps = [eps] + mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev) * eps + eps_2, _, _ = get_model_output(mean_pred, t - 1) + eps_prime = (eps + eps_2) / 2 + pred_prime = self._predict_xstart_from_eps(x, t, eps_prime) + mean_pred = pred_prime * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev) * eps_prime + else: + # Pseudo Linear Multistep (Adams-Bashforth) + old_eps = old_out["old_eps"] + old_eps.append(eps) + cur_order = min(order, len(old_eps)) + if cur_order == 1: + eps_prime = old_eps[-1] + elif cur_order == 2: + eps_prime = (3 * old_eps[-1] - old_eps[-2]) / 2 + elif cur_order == 3: + eps_prime = (23 * old_eps[-1] - 16 * old_eps[-2] + 5 * old_eps[-3]) / 12 + elif cur_order == 4: + eps_prime = (55 * old_eps[-1] - 59 * old_eps[-2] + 37 * old_eps[-3] - 9 * old_eps[-4]) / 24 + else: + raise RuntimeError('cur_order is invalid.') + pred_prime = self._predict_xstart_from_eps(x, t, eps_prime) + mean_pred = pred_prime * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev) * eps_prime + + if len(old_eps) >= order: + old_eps.pop(0) + + nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + sample = mean_pred * nonzero_mask + out["pred_xstart"] * (1 - nonzero_mask) + + return {"sample": sample, "pred_xstart": out_orig["pred_xstart"], "old_eps": old_eps} + + def plms_sample_loop( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + skip_timesteps=0, + init_image=None, + randomize_class=False, + cond_fn_with_grad=False, + order=2, + ): + """ + Generate samples from the model using Pseudo Linear Multistep. + + Same usage as p_sample_loop(). + """ + final = None + for sample in self.plms_sample_loop_progressive( + model, + shape, + noise=noise, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + device=device, + progress=progress, + skip_timesteps=skip_timesteps, + init_image=init_image, + randomize_class=randomize_class, + cond_fn_with_grad=cond_fn_with_grad, + order=order, + ): + final = sample + return final["sample"] + + def plms_sample_loop_progressive( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + skip_timesteps=0, + init_image=None, + randomize_class=False, + cond_fn_with_grad=False, + order=2, + ): + """ + Use PLMS to sample from the model and yield intermediate samples from each + timestep of PLMS. + + Same usage as p_sample_loop_progressive(). + """ + if device is None: + device = next(model.parameters()).device + assert isinstance(shape, (tuple, list)) + if noise is not None: + img = noise + else: + img = th.randn(*shape, device=device) + + if skip_timesteps and init_image is None: + init_image = th.zeros_like(img) + + indices = list(range(self.num_timesteps - skip_timesteps))[::-1] + + if init_image is not None: + my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0] + img = self.q_sample(init_image, my_t, img) + + if progress: + # Lazy import so that we don't depend on tqdm. + from tqdm.auto import tqdm + + indices = tqdm(indices) + + old_out = None + + for i in indices: + t = th.tensor([i] * shape[0], device=device) + if randomize_class and 'y' in model_kwargs: + model_kwargs['y'] = th.randint(low=0, high=model.num_classes, + size=model_kwargs['y'].shape, + device=model_kwargs['y'].device) + with th.no_grad(): + out = self.plms_sample( + model, + img, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + cond_fn_with_grad=cond_fn_with_grad, + order=order, + old_out=old_out, + ) + yield out + old_out = out + img = out["sample"] + + def _vb_terms_bpd( + self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None + ): + """ + Get a term for the variational lower-bound. + + The resulting units are bits (rather than nats, as one might expect). + This allows for comparison to other papers. + + :return: a dict with the following keys: + - 'output': a shape [N] tensor of NLLs or KLs. + - 'pred_xstart': the x_0 predictions. + """ + true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance( + x_start=x_start, x_t=x_t, t=t + ) + out = self.p_mean_variance( + model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs + ) + kl = normal_kl( + true_mean, true_log_variance_clipped, out["mean"], out["log_variance"] + ) + kl = mean_flat(kl) / np.log(2.0) + + decoder_nll = -discretized_gaussian_log_likelihood( + x_start, means=out["mean"], log_scales=0.5 * out["log_variance"] + ) + assert decoder_nll.shape == x_start.shape + decoder_nll = mean_flat(decoder_nll) / np.log(2.0) + + # At the first timestep return the decoder NLL, + # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t)) + output = th.where((t == 0), decoder_nll, kl) + return {"output": output, "pred_xstart": out["pred_xstart"]} + + def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, dataset=None): + """ + Compute training losses for a single timestep. + + :param model: the model to evaluate loss on. + :param x_start: the [N x C x ...] tensor of inputs. + :param t: a batch of timestep indices. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :param noise: if specified, the specific Gaussian noise to try to remove. + :return: a dict with the key "loss" containing a tensor of shape [N]. + Some mean or variance settings may also have other keys. + """ + + # enc = model.model._modules['module'] + enc = model.model + mask = model_kwargs['y']['mask'] + get_xyz = lambda sample: enc.rot2xyz(sample, mask=None, pose_rep=enc.pose_rep, translation=enc.translation, + glob=enc.glob, + # jointstype='vertices', # 3.4 iter/sec # USED ALSO IN MotionCLIP + jointstype='smpl', # 3.4 iter/sec + vertstrans=False) + + if model_kwargs is None: + model_kwargs = {} + if noise is None: + noise = th.randn_like(x_start) + x_t = self.q_sample(x_start, t, noise=noise) + + terms = {} + + if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL: + terms["loss"] = self._vb_terms_bpd( + model=model, + x_start=x_start, + x_t=x_t, + t=t, + clip_denoised=False, + model_kwargs=model_kwargs, + )["output"] + if self.loss_type == LossType.RESCALED_KL: + terms["loss"] *= self.num_timesteps + elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE: + model_output = model(x_t, self._scale_timesteps(t), **model_kwargs) + + if self.model_var_type in [ + ModelVarType.LEARNED, + ModelVarType.LEARNED_RANGE, + ]: + B, C = x_t.shape[:2] + assert model_output.shape == (B, C * 2, *x_t.shape[2:]) + model_output, model_var_values = th.split(model_output, C, dim=1) + # Learn the variance using the variational bound, but don't let + # it affect our mean prediction. + frozen_out = th.cat([model_output.detach(), model_var_values], dim=1) + terms["vb"] = self._vb_terms_bpd( + model=lambda *args, r=frozen_out: r, + x_start=x_start, + x_t=x_t, + t=t, + clip_denoised=False, + )["output"] + if self.loss_type == LossType.RESCALED_MSE: + # Divide by 1000 for equivalence with initial implementation. + # Without a factor of 1/1000, the VB term hurts the MSE term. + terms["vb"] *= self.num_timesteps / 1000.0 + + target = { + ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance( + x_start=x_start, x_t=x_t, t=t + )[0], + ModelMeanType.START_X: x_start, + ModelMeanType.EPSILON: noise, + }[self.model_mean_type] + assert model_output.shape == target.shape == x_start.shape # [bs, njoints, nfeats, nframes] + + terms["rot_mse"] = self.masked_l2(target, model_output, mask) # mean_flat(rot_mse) + + target_xyz, model_output_xyz = None, None + + if self.lambda_rcxyz > 0.: + target_xyz = get_xyz(target) # [bs, nvertices(vertices)/njoints(smpl), 3, nframes] + model_output_xyz = get_xyz(model_output) # [bs, nvertices, 3, nframes] + terms["rcxyz_mse"] = self.masked_l2(target_xyz, model_output_xyz, mask) # mean_flat((target_xyz - model_output_xyz) ** 2) + + if self.lambda_vel_rcxyz > 0.: + if self.data_rep == 'rot6d' and dataset.dataname in ['humanact12', 'uestc']: + target_xyz = get_xyz(target) if target_xyz is None else target_xyz + model_output_xyz = get_xyz(model_output) if model_output_xyz is None else model_output_xyz + target_xyz_vel = (target_xyz[:, :, :, 1:] - target_xyz[:, :, :, :-1]) + model_output_xyz_vel = (model_output_xyz[:, :, :, 1:] - model_output_xyz[:, :, :, :-1]) + terms["vel_xyz_mse"] = self.masked_l2(target_xyz_vel, model_output_xyz_vel, mask[:, :, :, 1:]) + + if self.lambda_fc > 0.: + torch.autograd.set_detect_anomaly(True) + if self.data_rep == 'rot6d' and dataset.dataname in ['humanact12', 'uestc']: + target_xyz = get_xyz(target) if target_xyz is None else target_xyz + model_output_xyz = get_xyz(model_output) if model_output_xyz is None else model_output_xyz + # 'L_Ankle', # 7, 'R_Ankle', # 8 , 'L_Foot', # 10, 'R_Foot', # 11 + l_ankle_idx, r_ankle_idx, l_foot_idx, r_foot_idx = 7, 8, 10, 11 + relevant_joints = [l_ankle_idx, l_foot_idx, r_ankle_idx, r_foot_idx] + gt_joint_xyz = target_xyz[:, relevant_joints, :, :] # [BatchSize, 4, 3, Frames] + gt_joint_vel = torch.linalg.norm(gt_joint_xyz[:, :, :, 1:] - gt_joint_xyz[:, :, :, :-1], axis=2) # [BatchSize, 4, Frames] + fc_mask = torch.unsqueeze((gt_joint_vel <= 0.01), dim=2).repeat(1, 1, 3, 1) + pred_joint_xyz = model_output_xyz[:, relevant_joints, :, :] # [BatchSize, 4, 3, Frames] + pred_vel = pred_joint_xyz[:, :, :, 1:] - pred_joint_xyz[:, :, :, :-1] + pred_vel[~fc_mask] = 0 + terms["fc"] = self.masked_l2(pred_vel, + torch.zeros(pred_vel.shape, device=pred_vel.device), + mask[:, :, :, 1:]) + if self.lambda_vel > 0.: + target_vel = (target[..., 1:] - target[..., :-1]) + model_output_vel = (model_output[..., 1:] - model_output[..., :-1]) + terms["vel_mse"] = self.masked_l2(target_vel[:, :-1, :, :], # Remove last joint, is the root location! + model_output_vel[:, :-1, :, :], + mask[:, :, :, 1:]) # mean_flat((target_vel - model_output_vel) ** 2) + + if self.lambda_target_loc > 0.: + assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for now!' + ref_target = model_kwargs['y']['target_cond'] + pred_target = get_target_location(model_output, dataset.mean_gpu, dataset.std_gpu, + model_kwargs['y']['lengths'], dataset.t2m_dataset.opt.joints_num, model.all_goal_joint_names, + model_kwargs['y']['target_joint_names'], model_kwargs['y']['is_heading']) + terms["target_loc"] = masked_goal_l2(pred_target, ref_target, model_kwargs['y'], model.all_goal_joint_names) + + + terms["loss"] = terms["rot_mse"] + terms.get('vb', 0.) +\ + (self.lambda_vel * terms.get('vel_mse', 0.)) +\ + (self.lambda_rcxyz * terms.get('rcxyz_mse', 0.)) + \ + (self.lambda_target_loc * terms.get('target_loc', 0.)) + \ + (self.lambda_fc * terms.get('fc', 0.)) + + else: + raise NotImplementedError(self.loss_type) + + return terms + + def fc_loss_rot_repr(self, gt_xyz, pred_xyz, mask): + def to_np_cpu(x): + return x.detach().cpu().numpy() + """ + pose_xyz: SMPL batch tensor of shape: [BatchSize, 24, 3, Frames] + """ + # 'L_Ankle', # 7, 'R_Ankle', # 8 , 'L_Foot', # 10, 'R_Foot', # 11 + + l_ankle_idx, r_ankle_idx = 7, 8 + l_foot_idx, r_foot_idx = 10, 11 + """ Contact calculated by 'Kfir Method' Commented code)""" + # contact_signal = torch.zeros((pose_xyz.shape[0], pose_xyz.shape[3], 2), device=pose_xyz.device) # [BatchSize, Frames, 2] + # left_xyz = 0.5 * (pose_xyz[:, l_ankle_idx, :, :] + pose_xyz[:, l_foot_idx, :, :]) # [BatchSize, 3, Frames] + # right_xyz = 0.5 * (pose_xyz[:, r_ankle_idx, :, :] + pose_xyz[:, r_foot_idx, :, :]) + # left_z, right_z = left_xyz[:, 2, :], right_xyz[:, 2, :] # [BatchSize, Frames] + # left_velocity = torch.linalg.norm(left_xyz[:, :, 2:] - left_xyz[:, :, :-2], axis=1) # [BatchSize, Frames] + # right_velocity = torch.linalg.norm(left_xyz[:, :, 2:] - left_xyz[:, :, :-2], axis=1) + # + # left_z_mask = left_z <= torch.mean(torch.sort(left_z)[0][:, :left_z.shape[1] // 5], axis=-1) + # left_z_mask = torch.stack([left_z_mask, left_z_mask], dim=-1) # [BatchSize, Frames, 2] + # left_z_mask[:, :, 1] = False # Blank right side + # contact_signal[left_z_mask] = 0.4 + # + # right_z_mask = right_z <= torch.mean(torch.sort(right_z)[0][:, :right_z.shape[1] // 5], axis=-1) + # right_z_mask = torch.stack([right_z_mask, right_z_mask], dim=-1) # [BatchSize, Frames, 2] + # right_z_mask[:, :, 0] = False # Blank left side + # contact_signal[right_z_mask] = 0.4 + # contact_signal[left_z <= (torch.mean(torch.sort(left_z)[:left_z.shape[0] // 5]) + 20), 0] = 1 + # contact_signal[right_z <= (torch.mean(torch.sort(right_z)[:right_z.shape[0] // 5]) + 20), 1] = 1 + + # plt.plot(to_np_cpu(left_z[0]), label='left_z') + # plt.plot(to_np_cpu(left_velocity[0]), label='left_velocity') + # plt.plot(to_np_cpu(contact_signal[0, :, 0]), label='left_fc') + # plt.grid() + # plt.legend() + # plt.show() + # plt.plot(to_np_cpu(right_z[0]), label='right_z') + # plt.plot(to_np_cpu(right_velocity[0]), label='right_velocity') + # plt.plot(to_np_cpu(contact_signal[0, :, 1]), label='right_fc') + # plt.grid() + # plt.legend() + # plt.show() + + gt_joint_xyz = gt_xyz[:, [l_ankle_idx, l_foot_idx, r_ankle_idx, r_foot_idx], :, :] # [BatchSize, 4, 3, Frames] + gt_joint_vel = torch.linalg.norm(gt_joint_xyz[:, :, :, 1:] - gt_joint_xyz[:, :, :, :-1], axis=2) # [BatchSize, 4, Frames] + fc_mask = (gt_joint_vel <= 0.01) + pred_joint_xyz = pred_xyz[:, [l_ankle_idx, l_foot_idx, r_ankle_idx, r_foot_idx], :, :] # [BatchSize, 4, 3, Frames] + pred_joint_vel = torch.linalg.norm(pred_joint_xyz[:, :, :, 1:] - pred_joint_xyz[:, :, :, :-1], axis=2) # [BatchSize, 4, Frames] + pred_joint_vel[~fc_mask] = 0 # Blank non-contact velocities frames. [BS,4,FRAMES] + pred_joint_vel = torch.unsqueeze(pred_joint_vel, dim=2) + + """DEBUG CODE""" + # print(f'mask: {mask.shape}') + # print(f'pred_joint_vel: {pred_joint_vel.shape}') + # plt.title(f'Joint: {joint_idx}') + # plt.plot(to_np_cpu(gt_joint_vel[0]), label='velocity') + # plt.plot(to_np_cpu(fc_mask[0]), label='fc') + # plt.grid() + # plt.legend() + # plt.show() + return self.masked_l2(pred_joint_vel, torch.zeros(pred_joint_vel.shape, device=pred_joint_vel.device), + mask[:, :, :, 1:]) + # TODO - NOT USED YET, JUST COMMITING TO NOT DELETE THIS AND KEEP INITIAL IMPLEMENTATION, NOT DONE! + def foot_contact_loss_humanml3d(self, target, model_output): + # root_rot_velocity (B, seq_len, 1) + # root_linear_velocity (B, seq_len, 2) + # root_y (B, seq_len, 1) + # ric_data (B, seq_len, (joint_num - 1)*3) , XYZ + # rot_data (B, seq_len, (joint_num - 1)*6) , 6D + # local_velocity (B, seq_len, joint_num*3) , XYZ + # foot contact (B, seq_len, 4) , + + target_fc = target[:, -4:, :, :] + root_rot_velocity = target[:, :1, :, :] + root_linear_velocity = target[:, 1:3, :, :] + root_y = target[:, 3:4, :, :] + ric_data = target[:, 4:67, :, :] # 4+(3*21)=67 + rot_data = target[:, 67:193, :, :] # 67+(6*21)=193 + local_velocity = target[:, 193:259, :, :] # 193+(3*22)=259 + contact = target[:, 259:, :, :] # 193+(3*22)=259 + contact_mask_gt = contact > 0.5 # contact mask order for indexes are fid_l [7, 10], fid_r [8, 11] + vel_lf_7 = local_velocity[:, 7 * 3:8 * 3, :, :] + vel_rf_8 = local_velocity[:, 8 * 3:9 * 3, :, :] + vel_lf_10 = local_velocity[:, 10 * 3:11 * 3, :, :] + vel_rf_11 = local_velocity[:, 11 * 3:12 * 3, :, :] + + calc_vel_lf_7 = ric_data[:, 6 * 3:7 * 3, :, 1:] - ric_data[:, 6 * 3:7 * 3, :, :-1] + calc_vel_rf_8 = ric_data[:, 7 * 3:8 * 3, :, 1:] - ric_data[:, 7 * 3:8 * 3, :, :-1] + calc_vel_lf_10 = ric_data[:, 9 * 3:10 * 3, :, 1:] - ric_data[:, 9 * 3:10 * 3, :, :-1] + calc_vel_rf_11 = ric_data[:, 10 * 3:11 * 3, :, 1:] - ric_data[:, 10 * 3:11 * 3, :, :-1] + + # vel_foots = torch.stack([vel_lf_7, vel_lf_10, vel_rf_8, vel_rf_11], dim=1) + for chosen_vel_foot_calc, chosen_vel_foot, joint_idx, contact_mask_idx in zip( + [calc_vel_lf_7, calc_vel_rf_8, calc_vel_lf_10, calc_vel_rf_11], + [vel_lf_7, vel_lf_10, vel_rf_8, vel_rf_11], + [7, 10, 8, 11], + [0, 1, 2, 3]): + tmp_mask_gt = contact_mask_gt[:, contact_mask_idx, :, :].cpu().detach().numpy().reshape(-1).astype(int) + chosen_vel_norm = np.linalg.norm(chosen_vel_foot.cpu().detach().numpy().reshape((3, -1)), axis=0) + chosen_vel_calc_norm = np.linalg.norm(chosen_vel_foot_calc.cpu().detach().numpy().reshape((3, -1)), + axis=0) + + print(tmp_mask_gt.shape) + print(chosen_vel_foot.shape) + print(chosen_vel_calc_norm.shape) + import matplotlib.pyplot as plt + plt.plot(tmp_mask_gt, label='FC mask') + plt.plot(chosen_vel_norm, label='Vel. XYZ norm (from vector)') + plt.plot(chosen_vel_calc_norm, label='Vel. XYZ norm (calculated diff XYZ)') + + plt.title(f'FC idx {contact_mask_idx}, Joint Index {joint_idx}') + plt.legend() + plt.show() + # print(vel_foots.shape) + return 0 + # TODO - NOT USED YET, JUST COMMITING TO NOT DELETE THIS AND KEEP INITIAL IMPLEMENTATION, NOT DONE! + def velocity_consistency_loss_humanml3d(self, target, model_output): + # root_rot_velocity (B, seq_len, 1) + # root_linear_velocity (B, seq_len, 2) + # root_y (B, seq_len, 1) + # ric_data (B, seq_len, (joint_num - 1)*3) , XYZ + # rot_data (B, seq_len, (joint_num - 1)*6) , 6D + # local_velocity (B, seq_len, joint_num*3) , XYZ + # foot contact (B, seq_len, 4) , + + target_fc = target[:, -4:, :, :] + root_rot_velocity = target[:, :1, :, :] + root_linear_velocity = target[:, 1:3, :, :] + root_y = target[:, 3:4, :, :] + ric_data = target[:, 4:67, :, :] # 4+(3*21)=67 + rot_data = target[:, 67:193, :, :] # 67+(6*21)=193 + local_velocity = target[:, 193:259, :, :] # 193+(3*22)=259 + contact = target[:, 259:, :, :] # 193+(3*22)=259 + + calc_vel_from_xyz = ric_data[:, :, :, 1:] - ric_data[:, :, :, :-1] + velocity_from_vector = local_velocity[:, 3:, :, 1:] # Slicing out root + r_rot_quat, r_pos = motion_process.recover_root_rot_pos(target.permute(0, 2, 3, 1).type(th.FloatTensor)) + print(f'r_rot_quat: {r_rot_quat.shape}') + print(f'calc_vel_from_xyz: {calc_vel_from_xyz.shape}') + calc_vel_from_xyz = calc_vel_from_xyz.permute(0, 2, 3, 1) + calc_vel_from_xyz = calc_vel_from_xyz.reshape((1, 1, -1, 21, 3)).type(th.FloatTensor) + r_rot_quat_adapted = r_rot_quat[..., :-1, None, :].repeat((1,1,1,21,1)).to(calc_vel_from_xyz.device) + print(f'calc_vel_from_xyz: {calc_vel_from_xyz.shape} , {calc_vel_from_xyz.device}') + print(f'r_rot_quat_adapted: {r_rot_quat_adapted.shape}, {r_rot_quat_adapted.device}') + + calc_vel_from_xyz = motion_process.qrot(r_rot_quat_adapted, calc_vel_from_xyz) + calc_vel_from_xyz = calc_vel_from_xyz.reshape((1, 1, -1, 21 * 3)) + calc_vel_from_xyz = calc_vel_from_xyz.permute(0, 3, 1, 2) + print(f'calc_vel_from_xyz: {calc_vel_from_xyz.shape} , {calc_vel_from_xyz.device}') + + import matplotlib.pyplot as plt + for i in range(21): + plt.plot(np.linalg.norm(calc_vel_from_xyz[:,i*3:(i+1)*3,:,:].cpu().detach().numpy().reshape((3, -1)), axis=0), label='Calc Vel') + plt.plot(np.linalg.norm(velocity_from_vector[:,i*3:(i+1)*3,:,:].cpu().detach().numpy().reshape((3, -1)), axis=0), label='Vector Vel') + plt.title(f'Joint idx: {i}') + plt.legend() + plt.show() + print(calc_vel_from_xyz.shape) + print(velocity_from_vector.shape) + diff = calc_vel_from_xyz-velocity_from_vector + print(np.linalg.norm(diff.cpu().detach().numpy().reshape((63, -1)), axis=0)) + + return 0 + + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + + This term can't be optimized, as it only depends on the encoder. + + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl( + mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 + ) + return mean_flat(kl_prior) / np.log(2.0) + + def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None): + """ + Compute the entire variational lower-bound, measured in bits-per-dim, + as well as other related quantities. + + :param model: the model to evaluate loss on. + :param x_start: the [N x C x ...] tensor of inputs. + :param clip_denoised: if True, clip denoised samples. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + + :return: a dict containing the following keys: + - total_bpd: the total variational lower-bound, per batch element. + - prior_bpd: the prior term in the lower-bound. + - vb: an [N x T] tensor of terms in the lower-bound. + - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep. + - mse: an [N x T] tensor of epsilon MSEs for each timestep. + """ + device = x_start.device + batch_size = x_start.shape[0] + + vb = [] + xstart_mse = [] + mse = [] + for t in list(range(self.num_timesteps))[::-1]: + t_batch = th.tensor([t] * batch_size, device=device) + noise = th.randn_like(x_start) + x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise) + # Calculate VLB term at the current timestep + with th.no_grad(): + out = self._vb_terms_bpd( + model, + x_start=x_start, + x_t=x_t, + t=t_batch, + clip_denoised=clip_denoised, + model_kwargs=model_kwargs, + ) + vb.append(out["output"]) + xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2)) + eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"]) + mse.append(mean_flat((eps - noise) ** 2)) + + vb = th.stack(vb, dim=1) + xstart_mse = th.stack(xstart_mse, dim=1) + mse = th.stack(mse, dim=1) + + prior_bpd = self._prior_bpd(x_start) + total_bpd = vb.sum(dim=1) + prior_bpd + return { + "total_bpd": total_bpd, + "prior_bpd": prior_bpd, + "vb": vb, + "xstart_mse": xstart_mse, + "mse": mse, + } + + +def _extract_into_tensor(arr, timesteps, broadcast_shape): + """ + Extract values from a 1-D numpy array for a batch of indices. + + :param arr: the 1-D numpy array. + :param timesteps: a tensor of indices into the array to extract. + :param broadcast_shape: a larger shape of K dimensions with the batch + dimension equal to the length of timesteps. + :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims. + """ + res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float() + while len(res.shape) < len(broadcast_shape): + res = res[..., None] + return res.expand(broadcast_shape) diff --git a/motion_diffusion_model/diffusion/logger.py b/motion_diffusion_model/diffusion/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..b1d856dcfea6b56a2ee8d37b286887430dbfac30 --- /dev/null +++ b/motion_diffusion_model/diffusion/logger.py @@ -0,0 +1,495 @@ +""" +Logger copied from OpenAI baselines to avoid extra RL-based dependencies: +https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py +""" + +import os +import sys +import shutil +import os.path as osp +import json +import time +import datetime +import tempfile +import warnings +from collections import defaultdict +from contextlib import contextmanager + +DEBUG = 10 +INFO = 20 +WARN = 30 +ERROR = 40 + +DISABLED = 50 + + +class KVWriter(object): + def writekvs(self, kvs): + raise NotImplementedError + + +class SeqWriter(object): + def writeseq(self, seq): + raise NotImplementedError + + +class HumanOutputFormat(KVWriter, SeqWriter): + def __init__(self, filename_or_file): + if isinstance(filename_or_file, str): + self.file = open(filename_or_file, "wt") + self.own_file = True + else: + assert hasattr(filename_or_file, "read"), ( + "expected file or str, got %s" % filename_or_file + ) + self.file = filename_or_file + self.own_file = False + + def writekvs(self, kvs): + # Create strings for printing + key2str = {} + for (key, val) in sorted(kvs.items()): + if hasattr(val, "__float__"): + valstr = "%-8.3g" % val + else: + valstr = str(val) + key2str[self._truncate(key)] = self._truncate(valstr) + + # Find max widths + if len(key2str) == 0: + print("WARNING: tried to write empty key-value dict") + return + else: + keywidth = max(map(len, key2str.keys())) + valwidth = max(map(len, key2str.values())) + + # Write out the data + dashes = "-" * (keywidth + valwidth + 7) + lines = [dashes] + for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()): + lines.append( + "| %s%s | %s%s |" + % (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val))) + ) + lines.append(dashes) + self.file.write("\n".join(lines) + "\n") + + # Flush the output to the file + self.file.flush() + + def _truncate(self, s): + maxlen = 30 + return s[: maxlen - 3] + "..." if len(s) > maxlen else s + + def writeseq(self, seq): + seq = list(seq) + for (i, elem) in enumerate(seq): + self.file.write(elem) + if i < len(seq) - 1: # add space unless this is the last one + self.file.write(" ") + self.file.write("\n") + self.file.flush() + + def close(self): + if self.own_file: + self.file.close() + + +class JSONOutputFormat(KVWriter): + def __init__(self, filename): + self.file = open(filename, "wt") + + def writekvs(self, kvs): + for k, v in sorted(kvs.items()): + if hasattr(v, "dtype"): + kvs[k] = float(v) + self.file.write(json.dumps(kvs) + "\n") + self.file.flush() + + def close(self): + self.file.close() + + +class CSVOutputFormat(KVWriter): + def __init__(self, filename): + self.file = open(filename, "w+t") + self.keys = [] + self.sep = "," + + def writekvs(self, kvs): + # Add our current row to the history + extra_keys = list(kvs.keys() - self.keys) + extra_keys.sort() + if extra_keys: + self.keys.extend(extra_keys) + self.file.seek(0) + lines = self.file.readlines() + self.file.seek(0) + for (i, k) in enumerate(self.keys): + if i > 0: + self.file.write(",") + self.file.write(k) + self.file.write("\n") + for line in lines[1:]: + self.file.write(line[:-1]) + self.file.write(self.sep * len(extra_keys)) + self.file.write("\n") + for (i, k) in enumerate(self.keys): + if i > 0: + self.file.write(",") + v = kvs.get(k) + if v is not None: + self.file.write(str(v)) + self.file.write("\n") + self.file.flush() + + def close(self): + self.file.close() + + +class TensorBoardOutputFormat(KVWriter): + """ + Dumps key/value pairs into TensorBoard's numeric format. + """ + + def __init__(self, dir): + os.makedirs(dir, exist_ok=True) + self.dir = dir + self.step = 1 + prefix = "events" + path = osp.join(osp.abspath(dir), prefix) + import tensorflow as tf + from tensorflow.python import pywrap_tensorflow + from tensorflow.core.util import event_pb2 + from tensorflow.python.util import compat + + self.tf = tf + self.event_pb2 = event_pb2 + self.pywrap_tensorflow = pywrap_tensorflow + self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path)) + + def writekvs(self, kvs): + def summary_val(k, v): + kwargs = {"tag": k, "simple_value": float(v)} + return self.tf.Summary.Value(**kwargs) + + summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()]) + event = self.event_pb2.Event(wall_time=time.time(), summary=summary) + event.step = ( + self.step + ) # is there any reason why you'd want to specify the step? + self.writer.WriteEvent(event) + self.writer.Flush() + self.step += 1 + + def close(self): + if self.writer: + self.writer.Close() + self.writer = None + + +def make_output_format(format, ev_dir, log_suffix=""): + os.makedirs(ev_dir, exist_ok=True) + if format == "stdout": + return HumanOutputFormat(sys.stdout) + elif format == "log": + return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix)) + elif format == "json": + return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix)) + elif format == "csv": + return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix)) + elif format == "tensorboard": + return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix)) + else: + raise ValueError("Unknown format specified: %s" % (format,)) + + +# ================================================================ +# API +# ================================================================ + + +def logkv(key, val): + """ + Log a value of some diagnostic + Call this once for each diagnostic quantity, each iteration + If called many times, last value will be used. + """ + get_current().logkv(key, val) + + +def logkv_mean(key, val): + """ + The same as logkv(), but if called many times, values averaged. + """ + get_current().logkv_mean(key, val) + + +def logkvs(d): + """ + Log a dictionary of key-value pairs + """ + for (k, v) in d.items(): + logkv(k, v) + + +def dumpkvs(): + """ + Write all of the diagnostics from the current iteration + """ + return get_current().dumpkvs() + + +def getkvs(): + return get_current().name2val + + +def log(*args, level=INFO): + """ + Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). + """ + get_current().log(*args, level=level) + + +def debug(*args): + log(*args, level=DEBUG) + + +def info(*args): + log(*args, level=INFO) + + +def warn(*args): + log(*args, level=WARN) + + +def error(*args): + log(*args, level=ERROR) + + +def set_level(level): + """ + Set logging threshold on current logger. + """ + get_current().set_level(level) + + +def set_comm(comm): + get_current().set_comm(comm) + + +def get_dir(): + """ + Get directory that log files are being written to. + will be None if there is no output directory (i.e., if you didn't call start) + """ + return get_current().get_dir() + + +record_tabular = logkv +dump_tabular = dumpkvs + + +@contextmanager +def profile_kv(scopename): + logkey = "wait_" + scopename + tstart = time.time() + try: + yield + finally: + get_current().name2val[logkey] += time.time() - tstart + + +def profile(n): + """ + Usage: + @profile("my_func") + def my_func(): code + """ + + def decorator_with_name(func): + def func_wrapper(*args, **kwargs): + with profile_kv(n): + return func(*args, **kwargs) + + return func_wrapper + + return decorator_with_name + + +# ================================================================ +# Backend +# ================================================================ + + +def get_current(): + if Logger.CURRENT is None: + _configure_default_logger() + + return Logger.CURRENT + + +class Logger(object): + DEFAULT = None # A logger with no output files. (See right below class definition) + # So that you can still log to the terminal without setting up any output files + CURRENT = None # Current logger being used by the free functions above + + def __init__(self, dir, output_formats, comm=None): + self.name2val = defaultdict(float) # values this iteration + self.name2cnt = defaultdict(int) + self.level = INFO + self.dir = dir + self.output_formats = output_formats + self.comm = comm + + # Logging API, forwarded + # ---------------------------------------- + def logkv(self, key, val): + self.name2val[key] = val + + def logkv_mean(self, key, val): + oldval, cnt = self.name2val[key], self.name2cnt[key] + self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1) + self.name2cnt[key] = cnt + 1 + + def dumpkvs(self): + if self.comm is None: + d = self.name2val + else: + d = mpi_weighted_mean( + self.comm, + { + name: (val, self.name2cnt.get(name, 1)) + for (name, val) in self.name2val.items() + }, + ) + if self.comm.rank != 0: + d["dummy"] = 1 # so we don't get a warning about empty dict + out = d.copy() # Return the dict for unit testing purposes + for fmt in self.output_formats: + if isinstance(fmt, KVWriter): + fmt.writekvs(d) + self.name2val.clear() + self.name2cnt.clear() + return out + + def log(self, *args, level=INFO): + if self.level <= level: + self._do_log(args) + + # Configuration + # ---------------------------------------- + def set_level(self, level): + self.level = level + + def set_comm(self, comm): + self.comm = comm + + def get_dir(self): + return self.dir + + def close(self): + for fmt in self.output_formats: + fmt.close() + + # Misc + # ---------------------------------------- + def _do_log(self, args): + for fmt in self.output_formats: + if isinstance(fmt, SeqWriter): + fmt.writeseq(map(str, args)) + + +def get_rank_without_mpi_import(): + # check environment variables here instead of importing mpi4py + # to avoid calling MPI_Init() when this module is imported + for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]: + if varname in os.environ: + return int(os.environ[varname]) + return 0 + + +def mpi_weighted_mean(comm, local_name2valcount): + """ + Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110 + Perform a weighted average over dicts that are each on a different node + Input: local_name2valcount: dict mapping key -> (value, count) + Returns: key -> mean + """ + all_name2valcount = comm.gather(local_name2valcount) + if comm.rank == 0: + name2sum = defaultdict(float) + name2count = defaultdict(float) + for n2vc in all_name2valcount: + for (name, (val, count)) in n2vc.items(): + try: + val = float(val) + except ValueError: + if comm.rank == 0: + warnings.warn( + "WARNING: tried to compute mean on non-float {}={}".format( + name, val + ) + ) + else: + name2sum[name] += val * count + name2count[name] += count + return {name: name2sum[name] / name2count[name] for name in name2sum} + else: + return {} + + +def configure(dir=None, format_strs=None, comm=None, log_suffix=""): + """ + If comm is provided, average all numerical stats across that comm + """ + if dir is None: + dir = os.getenv("OPENAI_LOGDIR") + if dir is None: + dir = osp.join( + tempfile.gettempdir(), + datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"), + ) + assert isinstance(dir, str) + dir = os.path.expanduser(dir) + os.makedirs(os.path.expanduser(dir), exist_ok=True) + + rank = get_rank_without_mpi_import() + if rank > 0: + log_suffix = log_suffix + "-rank%03i" % rank + + if format_strs is None: + if rank == 0: + format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",") + else: + format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",") + format_strs = filter(None, format_strs) + output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs] + + Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm) + if output_formats: + log("Logging to %s" % dir) + + +def _configure_default_logger(): + configure() + Logger.DEFAULT = Logger.CURRENT + + +def reset(): + if Logger.CURRENT is not Logger.DEFAULT: + Logger.CURRENT.close() + Logger.CURRENT = Logger.DEFAULT + log("Reset logger") + + +@contextmanager +def scoped_configure(dir=None, format_strs=None, comm=None): + prevlogger = Logger.CURRENT + configure(dir=dir, format_strs=format_strs, comm=comm) + try: + yield + finally: + Logger.CURRENT.close() + Logger.CURRENT = prevlogger + diff --git a/motion_diffusion_model/diffusion/losses.py b/motion_diffusion_model/diffusion/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..e3fded1953584eaaf183f3d2399be545a5003e0a --- /dev/null +++ b/motion_diffusion_model/diffusion/losses.py @@ -0,0 +1,77 @@ +# This code is based on https://github.com/openai/guided-diffusion +""" +Helpers for various likelihood-based losses. These are ported from the original +Ho et al. diffusion models codebase: +https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py +""" + +import numpy as np +import torch as th + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + Compute the KL divergence between two gaussians. + + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, th.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for th.exp(). + logvar1, logvar2 = [ + x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + th.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * th.exp(-logvar2) + ) + + +def approx_standard_normal_cdf(x): + """ + A fast approximation of the cumulative distribution function of the + standard normal. + """ + return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3)))) + + +def discretized_gaussian_log_likelihood(x, *, means, log_scales): + """ + Compute the log-likelihood of a Gaussian distribution discretizing to a + given image. + + :param x: the target images. It is assumed that this was uint8 values, + rescaled to the range [-1, 1]. + :param means: the Gaussian mean Tensor. + :param log_scales: the Gaussian log stddev Tensor. + :return: a tensor like x of log probabilities (in nats). + """ + assert x.shape == means.shape == log_scales.shape + centered_x = x - means + inv_stdv = th.exp(-log_scales) + plus_in = inv_stdv * (centered_x + 1.0 / 255.0) + cdf_plus = approx_standard_normal_cdf(plus_in) + min_in = inv_stdv * (centered_x - 1.0 / 255.0) + cdf_min = approx_standard_normal_cdf(min_in) + log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12)) + log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12)) + cdf_delta = cdf_plus - cdf_min + log_probs = th.where( + x < -0.999, + log_cdf_plus, + th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))), + ) + assert log_probs.shape == x.shape + return log_probs diff --git a/motion_diffusion_model/diffusion/nn.py b/motion_diffusion_model/diffusion/nn.py new file mode 100644 index 0000000000000000000000000000000000000000..41c18e7dd3d8cae1e719638e87c27f718f6a94e6 --- /dev/null +++ b/motion_diffusion_model/diffusion/nn.py @@ -0,0 +1,197 @@ +# This code is based on https://github.com/openai/guided-diffusion +""" +Various utilities for neural networks. +""" + +import math + +import torch as th +import torch.nn as nn + + +# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. +class SiLU(nn.Module): + def forward(self, x): + return x * th.sigmoid(x) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + return super().forward(x.float()).type(x.dtype) + + +def conv_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D convolution module. + """ + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def linear(*args, **kwargs): + """ + Create a linear module. + """ + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D average pooling module. + """ + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def update_ema(target_params, source_params, rate=0.99): + """ + Update target parameters to be closer to those of source parameters using + an exponential moving average. + + :param target_params: the target parameter sequence. + :param source_params: the source parameter sequence. + :param rate: the EMA rate (closer to 1 means slower). + """ + for targ, src in zip(target_params, source_params): + targ.detach().mul_(rate).add_(src, alpha=1 - rate) + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + +def sum_flat(tensor): + """ + Take the sum over all non-batch dimensions. + """ + return tensor.sum(dim=list(range(1, len(tensor.shape)))) + + +def normalization(channels): + """ + Make a standard normalization layer. + + :param channels: number of input channels. + :return: an nn.Module for normalization. + """ + return GroupNorm32(32, channels) + + +def timestep_embedding(timesteps, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + half = dim // 2 + freqs = th.exp( + -math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = th.cat([th.cos(args), th.sin(args)], dim=-1) + if dim % 2: + embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1) + return embedding + + +def checkpoint(func, inputs, params, flag): + """ + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(th.autograd.Function): + @staticmethod + @th.cuda.amp.custom_fwd + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_length = length + ctx.save_for_backward(*args) + with th.no_grad(): + output_tensors = ctx.run_function(*args[:length]) + return output_tensors + + @staticmethod + @th.cuda.amp.custom_bwd + def backward(ctx, *output_grads): + args = list(ctx.saved_tensors) + + # Filter for inputs that require grad. If none, exit early. + input_indices = [i for (i, x) in enumerate(args) if x.requires_grad] + if not input_indices: + return (None, None) + tuple(None for _ in args) + + with th.enable_grad(): + for i in input_indices: + if i < ctx.input_length: + # Not sure why the OAI code does this little + # dance. It might not be necessary. + args[i] = args[i].detach().requires_grad_() + args[i] = args[i].view_as(args[i]) + output_tensors = ctx.run_function(*args[:ctx.input_length]) + + if isinstance(output_tensors, th.Tensor): + output_tensors = [output_tensors] + + # Filter for outputs that require grad. If none, exit early. + out_and_grads = [(o, g) for (o, g) in zip(output_tensors, output_grads) if o.requires_grad] + if not out_and_grads: + return (None, None) + tuple(None for _ in args) + + # Compute gradients on the filtered tensors. + computed_grads = th.autograd.grad( + [o for (o, g) in out_and_grads], + [args[i] for i in input_indices], + [g for (o, g) in out_and_grads] + ) + + # Reassemble the complete gradient tuple. + input_grads = [None for _ in args] + for (i, g) in zip(input_indices, computed_grads): + input_grads[i] = g + return (None, None) + tuple(input_grads) diff --git a/motion_diffusion_model/diffusion/resample.py b/motion_diffusion_model/diffusion/resample.py new file mode 100644 index 0000000000000000000000000000000000000000..c82eccdcd47c468d41e7cbe02de6a731f2c9bf81 --- /dev/null +++ b/motion_diffusion_model/diffusion/resample.py @@ -0,0 +1,154 @@ +from abc import ABC, abstractmethod + +import numpy as np +import torch as th +import torch.distributed as dist + + +def create_named_schedule_sampler(name, diffusion): + """ + Create a ScheduleSampler from a library of pre-defined samplers. + + :param name: the name of the sampler. + :param diffusion: the diffusion object to sample for. + """ + if name == "uniform": + return UniformSampler(diffusion) + elif name == "loss-second-moment": + return LossSecondMomentResampler(diffusion) + else: + raise NotImplementedError(f"unknown schedule sampler: {name}") + + +class ScheduleSampler(ABC): + """ + A distribution over timesteps in the diffusion process, intended to reduce + variance of the objective. + + By default, samplers perform unbiased importance sampling, in which the + objective's mean is unchanged. + However, subclasses may override sample() to change how the resampled + terms are reweighted, allowing for actual changes in the objective. + """ + + @abstractmethod + def weights(self): + """ + Get a numpy array of weights, one per diffusion step. + + The weights needn't be normalized, but must be positive. + """ + + def sample(self, batch_size, device): + """ + Importance-sample timesteps for a batch. + + :param batch_size: the number of timesteps. + :param device: the torch device to save to. + :return: a tuple (timesteps, weights): + - timesteps: a tensor of timestep indices. + - weights: a tensor of weights to scale the resulting losses. + """ + w = self.weights() + p = w / np.sum(w) + indices_np = np.random.choice(len(p), size=(batch_size,), p=p) + indices = th.from_numpy(indices_np).long().to(device) + weights_np = 1 / (len(p) * p[indices_np]) + weights = th.from_numpy(weights_np).float().to(device) + return indices, weights + + +class UniformSampler(ScheduleSampler): + def __init__(self, diffusion): + self.diffusion = diffusion + self._weights = np.ones([diffusion.num_timesteps]) + + def weights(self): + return self._weights + + +class LossAwareSampler(ScheduleSampler): + def update_with_local_losses(self, local_ts, local_losses): + """ + Update the reweighting using losses from a model. + + Call this method from each rank with a batch of timesteps and the + corresponding losses for each of those timesteps. + This method will perform synchronization to make sure all of the ranks + maintain the exact same reweighting. + + :param local_ts: an integer Tensor of timesteps. + :param local_losses: a 1D Tensor of losses. + """ + batch_sizes = [ + th.tensor([0], dtype=th.int32, device=local_ts.device) + for _ in range(dist.get_world_size()) + ] + dist.all_gather( + batch_sizes, + th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device), + ) + + # Pad all_gather batches to be the maximum batch size. + batch_sizes = [x.item() for x in batch_sizes] + max_bs = max(batch_sizes) + + timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes] + loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes] + dist.all_gather(timestep_batches, local_ts) + dist.all_gather(loss_batches, local_losses) + timesteps = [ + x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs] + ] + losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]] + self.update_with_all_losses(timesteps, losses) + + @abstractmethod + def update_with_all_losses(self, ts, losses): + """ + Update the reweighting using losses from a model. + + Sub-classes should override this method to update the reweighting + using losses from the model. + + This method directly updates the reweighting without synchronizing + between workers. It is called by update_with_local_losses from all + ranks with identical arguments. Thus, it should have deterministic + behavior to maintain state across workers. + + :param ts: a list of int timesteps. + :param losses: a list of float losses, one per timestep. + """ + + +class LossSecondMomentResampler(LossAwareSampler): + def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001): + self.diffusion = diffusion + self.history_per_term = history_per_term + self.uniform_prob = uniform_prob + self._loss_history = np.zeros( + [diffusion.num_timesteps, history_per_term], dtype=np.float64 + ) + self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int) + + def weights(self): + if not self._warmed_up(): + return np.ones([self.diffusion.num_timesteps], dtype=np.float64) + weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1)) + weights /= np.sum(weights) + weights *= 1 - self.uniform_prob + weights += self.uniform_prob / len(weights) + return weights + + def update_with_all_losses(self, ts, losses): + for t, loss in zip(ts, losses): + if self._loss_counts[t] == self.history_per_term: + # Shift out the oldest loss term. + self._loss_history[t, :-1] = self._loss_history[t, 1:] + self._loss_history[t, -1] = loss + else: + self._loss_history[t, self._loss_counts[t]] = loss + self._loss_counts[t] += 1 + + def _warmed_up(self): + return (self._loss_counts == self.history_per_term).all() diff --git a/motion_diffusion_model/diffusion/respace.py b/motion_diffusion_model/diffusion/respace.py new file mode 100644 index 0000000000000000000000000000000000000000..8b1194d79b9c4ee5b88407df414d1524d4d31525 --- /dev/null +++ b/motion_diffusion_model/diffusion/respace.py @@ -0,0 +1,134 @@ +# This code is based on https://github.com/openai/guided-diffusion +import numpy as np +import torch as th + +from .gaussian_diffusion import GaussianDiffusion +from utils.misc import wrapped_getattr + + +def space_timesteps(num_timesteps, section_counts): + """ + Create a list of timesteps to use from an original diffusion process, + given the number of timesteps we want to take from equally-sized portions + of the original process. + + For example, if there's 300 timesteps and the section counts are [10,15,20] + then the first 100 timesteps are strided to be 10 timesteps, the second 100 + are strided to be 15 timesteps, and the final 100 are strided to be 20. + + If the stride is a string starting with "ddim", then the fixed striding + from the DDIM paper is used, and only one section is allowed. + + :param num_timesteps: the number of diffusion steps in the original + process to divide up. + :param section_counts: either a list of numbers, or a string containing + comma-separated numbers, indicating the step count + per section. As a special case, use "ddimN" where N + is a number of steps to use the striding from the + DDIM paper. + :return: a set of diffusion steps from the original process to use. + """ + if isinstance(section_counts, str): + if section_counts.startswith("ddim"): + desired_count = int(section_counts[len("ddim") :]) + for i in range(1, num_timesteps): + if len(range(0, num_timesteps, i)) == desired_count: + return set(range(0, num_timesteps, i)) + raise ValueError( + f"cannot create exactly {num_timesteps} steps with an integer stride" + ) + section_counts = [int(x) for x in section_counts.split(",")] + size_per = num_timesteps // len(section_counts) + extra = num_timesteps % len(section_counts) + start_idx = 0 + all_steps = [] + for i, section_count in enumerate(section_counts): + size = size_per + (1 if i < extra else 0) + if size < section_count: + raise ValueError( + f"cannot divide section of {size} steps into {section_count}" + ) + if section_count <= 1: + frac_stride = 1 + else: + frac_stride = (size - 1) / (section_count - 1) + cur_idx = 0.0 + taken_steps = [] + for _ in range(section_count): + taken_steps.append(start_idx + round(cur_idx)) + cur_idx += frac_stride + all_steps += taken_steps + start_idx += size + return set(all_steps) + + +class SpacedDiffusion(GaussianDiffusion): + """ + A diffusion process which can skip steps in a base diffusion process. + + :param use_timesteps: a collection (sequence or set) of timesteps from the + original diffusion process to retain. + :param kwargs: the kwargs to create the base diffusion process. + """ + + def __init__(self, use_timesteps, **kwargs): + self.use_timesteps = set(use_timesteps) + self.timestep_map = [] + self.original_num_steps = len(kwargs["betas"]) + + base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa + last_alpha_cumprod = 1.0 + new_betas = [] + for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod): + if i in self.use_timesteps: + new_betas.append(1 - alpha_cumprod / last_alpha_cumprod) + last_alpha_cumprod = alpha_cumprod + self.timestep_map.append(i) + kwargs["betas"] = np.array(new_betas) + super().__init__(**kwargs) + + def p_mean_variance( + self, model, *args, **kwargs + ): # pylint: disable=signature-differs + return super().p_mean_variance(self._wrap_model(model), *args, **kwargs) + + def training_losses( + self, model, *args, **kwargs + ): # pylint: disable=signature-differs + return super().training_losses(self._wrap_model(model), *args, **kwargs) + + def condition_mean(self, cond_fn, *args, **kwargs): + return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs) + + def condition_score(self, cond_fn, *args, **kwargs): + return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs) + + def _wrap_model(self, model): + if isinstance(model, _WrappedModel): + return model + return _WrappedModel( + model, self.timestep_map, self.rescale_timesteps, self.original_num_steps + ) + + def _scale_timesteps(self, t): + # Scaling is done by the wrapped model. + return t + + +class _WrappedModel: + def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps): + self.model = model + self.timestep_map = timestep_map + self.rescale_timesteps = rescale_timesteps + self.original_num_steps = original_num_steps + + def __call__(self, x, ts, **kwargs): + map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype) + new_ts = map_tensor[ts] + if self.rescale_timesteps: + new_ts = new_ts.float() * (1000.0 / self.original_num_steps) + return self.model(x, new_ts, **kwargs) + + def __getattr__(self, name, default=None): + # this method is reached only if name is not in self.__dict__. + return wrapped_getattr(self, name, default) diff --git a/motion_diffusion_model/model/BERT/BERT_encoder.py b/motion_diffusion_model/model/BERT/BERT_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..369608756c3c9b567a9e588ed2d2422c3b8dc478 --- /dev/null +++ b/motion_diffusion_model/model/BERT/BERT_encoder.py @@ -0,0 +1,32 @@ +import torch.nn as nn +import os + +def load_bert(model_path): + bert = BERT(model_path) + bert.eval() + bert.text_model.training = False + for p in bert.parameters(): + p.requires_grad = False + return bert + +class BERT(nn.Module): + def __init__(self, modelpath: str): + super().__init__() + + from transformers import AutoTokenizer, AutoModel + from transformers import logging + logging.set_verbosity_error() + # Tokenizer + os.environ["TOKENIZERS_PARALLELISM"] = "false" + # Tokenizer + self.tokenizer = AutoTokenizer.from_pretrained(modelpath) + # Text model + self.text_model = AutoModel.from_pretrained(modelpath) + + + def forward(self, texts): + encoded_inputs = self.tokenizer(texts, return_tensors="pt", padding=True) + output = self.text_model(**encoded_inputs.to(self.text_model.device)).last_hidden_state + mask = encoded_inputs.attention_mask.to(dtype=bool) + # output = output * mask.unsqueeze(-1) + return output, mask diff --git a/motion_diffusion_model/model/cfg_sampler.py b/motion_diffusion_model/model/cfg_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..6e02517ea9340ebc1789f565d8de5714cc92ca03 --- /dev/null +++ b/motion_diffusion_model/model/cfg_sampler.py @@ -0,0 +1,33 @@ +import numpy as np +import torch +import torch.nn as nn +from copy import deepcopy + +# A wrapper model for Classifier-free guidance **SAMPLING** only +# https://arxiv.org/abs/2207.12598 +class ClassifierFreeSampleModel(nn.Module): + + def __init__(self, model): + super().__init__() + self.model = model # model is the actual model to run + + assert self.model.cond_mask_prob > 0, 'Cannot run a guided diffusion on a model that has not been trained with no conditions' + + # pointers to inner model + self.rot2xyz = self.model.rot2xyz + self.translation = self.model.translation + self.njoints = self.model.njoints + self.nfeats = self.model.nfeats + self.data_rep = self.model.data_rep + self.cond_mode = self.model.cond_mode + self.encode_text = self.model.encode_text + + def forward(self, x, timesteps, y=None): + cond_mode = self.model.cond_mode + assert cond_mode in ['text', 'action'] + y_uncond = deepcopy(y) + y_uncond['uncond'] = True + out = self.model(x, timesteps, y) + out_uncond = self.model(x, timesteps, y_uncond) + return out_uncond + (y['scale'].view(-1, 1, 1, 1) * (out - out_uncond)) + diff --git a/motion_diffusion_model/model/mdm.py b/motion_diffusion_model/model/mdm.py new file mode 100644 index 0000000000000000000000000000000000000000..e227f652065317eef1d2107ba124d936d95d8e42 --- /dev/null +++ b/motion_diffusion_model/model/mdm.py @@ -0,0 +1,480 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import clip +from model.rotation2xyz import Rotation2xyz +from model.BERT.BERT_encoder import load_bert +from utils.misc import WeightedSum + + +class MDM(nn.Module): + def __init__(self, modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot, + latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1, + ablation=None, activation="gelu", legacy=False, data_rep='rot6d', dataset='amass', clip_dim=512, + arch='trans_enc', emb_trans_dec=False, clip_version=None, **kargs): + super().__init__() + + self.legacy = legacy + self.modeltype = modeltype + self.njoints = njoints + self.nfeats = nfeats + self.num_actions = num_actions + self.data_rep = data_rep + self.dataset = dataset + + self.pose_rep = pose_rep + self.glob = glob + self.glob_rot = glob_rot + self.translation = translation + + self.latent_dim = latent_dim + + self.ff_size = ff_size + self.num_layers = num_layers + self.num_heads = num_heads + self.dropout = dropout + + self.ablation = ablation + self.activation = activation + self.clip_dim = clip_dim + self.action_emb = kargs.get('action_emb', None) + self.input_feats = self.njoints * self.nfeats + + self.normalize_output = kargs.get('normalize_encoder_output', False) + + self.cond_mode = kargs.get('cond_mode', 'no_cond') + self.cond_mask_prob = kargs.get('cond_mask_prob', 0.) + self.mask_frames = kargs.get('mask_frames', False) + self.arch = arch + self.gru_emb_dim = self.latent_dim if self.arch == 'gru' else 0 + self.input_process = InputProcess(self.data_rep, self.input_feats+self.gru_emb_dim, self.latent_dim) + + self.emb_policy = kargs.get('emb_policy', 'add') + + self.sequence_pos_encoder = PositionalEncoding(self.latent_dim, self.dropout, max_len=kargs.get('pos_embed_max_len', 5000)) + self.emb_trans_dec = emb_trans_dec + + self.pred_len = kargs.get('pred_len', 0) + self.context_len = kargs.get('context_len', 0) + self.total_len = self.pred_len + self.context_len + self.is_prefix_comp = self.total_len > 0 + self.all_goal_joint_names = kargs.get('all_goal_joint_names', []) + + self.multi_target_cond = kargs.get('multi_target_cond', False) + self.multi_encoder_type = kargs.get('multi_encoder_type', 'multi') + self.target_enc_layers = kargs.get('target_enc_layers', 1) + if self.multi_target_cond: + if self.multi_encoder_type == 'multi': + self.embed_target_cond = EmbedTargetLocMulti(self.all_goal_joint_names, self.latent_dim) + elif self.multi_encoder_type == 'single': + self.embed_target_cond = EmbedTargetLocSingle(self.all_goal_joint_names, self.latent_dim, self.target_enc_layers) + elif self.multi_encoder_type == 'split': + self.embed_target_cond = EmbedTargetLocSplit(self.all_goal_joint_names, self.latent_dim, self.target_enc_layers) + + if self.arch == 'trans_enc': + print("TRANS_ENC init") + seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim, + nhead=self.num_heads, + dim_feedforward=self.ff_size, + dropout=self.dropout, + activation=self.activation) + + self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer, + num_layers=self.num_layers) + elif self.arch == 'trans_dec': + print("TRANS_DEC init") + seqTransDecoderLayer = nn.TransformerDecoderLayer(d_model=self.latent_dim, + nhead=self.num_heads, + dim_feedforward=self.ff_size, + dropout=self.dropout, + activation=activation) + self.seqTransDecoder = nn.TransformerDecoder(seqTransDecoderLayer, + num_layers=self.num_layers) + elif self.arch == 'gru': + print("GRU init") + self.gru = nn.GRU(self.latent_dim, self.latent_dim, num_layers=self.num_layers, batch_first=True) + else: + raise ValueError('Please choose correct architecture [trans_enc, trans_dec, gru]') + + self.embed_timestep = TimestepEmbedder(self.latent_dim, self.sequence_pos_encoder) + + if self.cond_mode != 'no_cond': + if 'text' in self.cond_mode: + # We support CLIP encoder and DistilBERT + print('EMBED TEXT') + + self.text_encoder_type = kargs.get('text_encoder_type', 'clip') + + if self.text_encoder_type == "clip": + print('Loading CLIP...') + self.clip_version = clip_version + self.clip_model = self.load_and_freeze_clip(clip_version) + self.encode_text = self.clip_encode_text + elif self.text_encoder_type == 'bert': + assert self.arch == 'trans_dec' + # assert self.emb_trans_dec == False # passing just the time embed so it's fine + print("Loading BERT...") + # bert_model_path = 'model/BERT/distilbert-base-uncased' + bert_model_path = 'distilbert/distilbert-base-uncased' + self.clip_model = load_bert(bert_model_path) # Sorry for that, the naming is for backward compatibility + self.encode_text = self.bert_encode_text + self.clip_dim = 768 + else: + raise ValueError('We only support [CLIP, BERT] text encoders') + + self.embed_text = nn.Linear(self.clip_dim, self.latent_dim) + + if 'action' in self.cond_mode: + self.embed_action = EmbedAction(self.num_actions, self.latent_dim) + print('EMBED ACTION') + + self.output_process = OutputProcess(self.data_rep, self.input_feats, self.latent_dim, self.njoints, + self.nfeats) + + self.rot2xyz = Rotation2xyz(device='cpu', dataset=self.dataset) + + def parameters_wo_clip(self): + return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')] + + def load_and_freeze_clip(self, clip_version): + clip_model, clip_preprocess = clip.load(clip_version, device='cpu', + jit=False) # Must set jit=False for training + clip.model.convert_weights( + clip_model) # Actually this line is unnecessary since clip by default already on float16 + + # Freeze CLIP weights + clip_model.eval() + for p in clip_model.parameters(): + p.requires_grad = False + + return clip_model + + def mask_cond(self, cond, force_mask=False): + bs = cond.shape[-2] + if force_mask: + return torch.zeros_like(cond) + elif self.training and self.cond_mask_prob > 0.: + mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_mask_prob).view(1, bs, 1) # 1-> use null_cond, 0-> use real cond + return cond * (1. - mask) + else: + return cond + + def clip_encode_text(self, raw_text): + # raw_text - list (batch_size length) of strings with input text prompts + device = next(self.parameters()).device + max_text_len = 20 if self.dataset in ['humanml', 'kit'] else None # Specific hardcoding for humanml dataset + if max_text_len is not None: + default_context_length = 77 + context_length = max_text_len + 2 # start_token + 20 + end_token + assert context_length < default_context_length + texts = clip.tokenize(raw_text, context_length=context_length, truncate=True).to(device) # [bs, context_length] # if n_tokens > context_length -> will truncate + # print('texts', texts.shape) + zero_pad = torch.zeros([texts.shape[0], default_context_length-context_length], dtype=texts.dtype, device=texts.device) + texts = torch.cat([texts, zero_pad], dim=1) + # print('texts after pad', texts.shape, texts) + else: + texts = clip.tokenize(raw_text, truncate=True).to(device) # [bs, context_length] # if n_tokens > 77 -> will truncate + return self.clip_model.encode_text(texts).float().unsqueeze(0) + + def bert_encode_text(self, raw_text): + # enc_text = self.clip_model(raw_text) + # enc_text = enc_text.permute(1, 0, 2) + # return enc_text + enc_text, mask = self.clip_model(raw_text) # self.clip_model.get_last_hidden_state(raw_text, return_mask=True) # mask: False means no token there + enc_text = enc_text.permute(1, 0, 2) + mask = ~mask # mask: True means no token there, we invert since the meaning of mask for transformer is inverted https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html + return enc_text, mask + + def forward(self, x, timesteps, y=None): + """ + x: [batch_size, njoints, nfeats, max_frames], denoted x_t in the paper + timesteps: [batch_size] (int) + """ + bs, njoints, nfeats, nframes = x.shape + time_emb = self.embed_timestep(timesteps) # [1, bs, d] + + if 'target_cond' in y.keys(): + # NOTE: We don't use CFG for joints - but we do wat to support uncond sampling for generation and eval! + time_emb += self.mask_cond(self.embed_target_cond(y['target_cond'], y['target_joint_names'], y['is_heading'])[None], force_mask=y.get('target_uncond', False)) # For uncond support and CFG + # time_emb += self.embed_target_cond(y['target_cond'], y['target_joint_names'], y['is_heading'])[None] + + # Build input for prefix completion + if self.is_prefix_comp: + x = torch.cat([y['prefix'], x], dim=-1) + y['mask'] = torch.cat([torch.ones([bs, 1, 1, self.context_len], dtype=y['mask'].dtype, device=y['mask'].device), + y['mask']], dim=-1) + + force_mask = y.get('uncond', False) + if 'text' in self.cond_mode: + if 'text_embed' in y.keys(): # caching option + enc_text = y['text_embed'] + else: + enc_text = self.encode_text(y['text']) + if type(enc_text) == tuple: + enc_text, text_mask = enc_text + if text_mask.shape[0] == 1 and bs > 1: # casting mask for the single-prompt-for-all case + text_mask = torch.repeat_interleave(text_mask, bs, dim=0) + text_emb = self.embed_text(self.mask_cond(enc_text, force_mask=force_mask)) # casting mask for the single-prompt-for-all case + if self.emb_policy == 'add': + emb = text_emb + time_emb + else: + emb = torch.cat([time_emb, text_emb], dim=0) + text_mask = torch.cat([torch.zeros_like(text_mask[:, 0:1]), text_mask], dim=1) + if 'action' in self.cond_mode: + action_emb = self.embed_action(y['action']) + emb = time_emb + self.mask_cond(action_emb, force_mask=force_mask) + if self.cond_mode == 'no_cond': + # unconstrained + emb = time_emb + + if self.arch == 'gru': + x_reshaped = x.reshape(bs, njoints*nfeats, 1, nframes) + emb_gru = emb.repeat(nframes, 1, 1) #[#frames, bs, d] + emb_gru = emb_gru.permute(1, 2, 0) #[bs, d, #frames] + emb_gru = emb_gru.reshape(bs, self.latent_dim, 1, nframes) #[bs, d, 1, #frames] + x = torch.cat((x_reshaped, emb_gru), axis=1) #[bs, d+joints*feat, 1, #frames] + + x = self.input_process(x) + + # TODO - move to collate + frames_mask = None + is_valid_mask = y['mask'].shape[-1] > 1 # Don't use mask with the generate script + if self.mask_frames and is_valid_mask: + frames_mask = torch.logical_not(y['mask'][..., :x.shape[0]].squeeze(1).squeeze(1)).to(device=x.device) + if self.emb_trans_dec or self.arch == 'trans_enc': + step_mask = torch.zeros((bs, 1), dtype=torch.bool, device=x.device) + frames_mask = torch.cat([step_mask, frames_mask], dim=1) + + if self.arch == 'trans_enc': + # adding the timestep embed + xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d] + xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d] + output = self.seqTransEncoder(xseq, src_key_padding_mask=frames_mask)[1:] # , src_key_padding_mask=~maskseq) # [seqlen, bs, d] + + elif self.arch == 'trans_dec': + if self.emb_trans_dec: + xseq = torch.cat((time_emb, x), axis=0) + else: + xseq = x + xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d] + + if self.text_encoder_type == 'clip': + output = self.seqTransDecoder(tgt=xseq, memory=emb, tgt_key_padding_mask=frames_mask) + elif self.text_encoder_type == 'bert': + output = self.seqTransDecoder(tgt=xseq, memory=emb, memory_key_padding_mask=text_mask, tgt_key_padding_mask=frames_mask) # Rotem's bug fix + else: + raise ValueError() + + if self.emb_trans_dec: + output = output[1:] # [seqlen, bs, d] + + elif self.arch == 'gru': + xseq = x + xseq = self.sequence_pos_encoder(xseq) # [seqlen, bs, d] + output, _ = self.gru(xseq) + + # Extract completed suffix + if self.is_prefix_comp: + output = output[self.context_len:] + y['mask'] = y['mask'][..., self.context_len:] + + output = self.output_process(output) # [bs, njoints, nfeats, nframes] + return output + + + def _apply(self, fn): + super()._apply(fn) + self.rot2xyz.smpl_model._apply(fn) + + + def train(self, *args, **kwargs): + super().train(*args, **kwargs) + self.rot2xyz.smpl_model.train(*args, **kwargs) + + +class PositionalEncoding(nn.Module): + def __init__(self, d_model, dropout=0.1, max_len=5000): + super(PositionalEncoding, self).__init__() + self.dropout = nn.Dropout(p=dropout) + + pe = torch.zeros(max_len, d_model) + position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) + div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model)) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0).transpose(0, 1) + + self.register_buffer('pe', pe) + + def forward(self, x): + # not used in the final model + x = x + self.pe[:x.shape[0], :] + return self.dropout(x) + + +class TimestepEmbedder(nn.Module): + def __init__(self, latent_dim, sequence_pos_encoder): + super().__init__() + self.latent_dim = latent_dim + self.sequence_pos_encoder = sequence_pos_encoder + + time_embed_dim = self.latent_dim + self.time_embed = nn.Sequential( + nn.Linear(self.latent_dim, time_embed_dim), + nn.SiLU(), + nn.Linear(time_embed_dim, time_embed_dim), + ) + + def forward(self, timesteps): + return self.time_embed(self.sequence_pos_encoder.pe[timesteps]).permute(1, 0, 2) + + +class InputProcess(nn.Module): + def __init__(self, data_rep, input_feats, latent_dim): + super().__init__() + self.data_rep = data_rep + self.input_feats = input_feats + self.latent_dim = latent_dim + self.poseEmbedding = nn.Linear(self.input_feats, self.latent_dim) + if self.data_rep == 'rot_vel': + self.velEmbedding = nn.Linear(self.input_feats, self.latent_dim) + + def forward(self, x): + bs, njoints, nfeats, nframes = x.shape + x = x.permute((3, 0, 1, 2)).reshape(nframes, bs, njoints*nfeats) + + if self.data_rep in ['rot6d', 'xyz', 'hml_vec']: + x = self.poseEmbedding(x) # [seqlen, bs, d] + return x + elif self.data_rep == 'rot_vel': + first_pose = x[[0]] # [1, bs, 150] + first_pose = self.poseEmbedding(first_pose) # [1, bs, d] + vel = x[1:] # [seqlen-1, bs, 150] + vel = self.velEmbedding(vel) # [seqlen-1, bs, d] + return torch.cat((first_pose, vel), axis=0) # [seqlen, bs, d] + else: + raise ValueError + + +class OutputProcess(nn.Module): + def __init__(self, data_rep, input_feats, latent_dim, njoints, nfeats): + super().__init__() + self.data_rep = data_rep + self.input_feats = input_feats + self.latent_dim = latent_dim + self.njoints = njoints + self.nfeats = nfeats + self.poseFinal = nn.Linear(self.latent_dim, self.input_feats) + if self.data_rep == 'rot_vel': + self.velFinal = nn.Linear(self.latent_dim, self.input_feats) + + def forward(self, output): + nframes, bs, d = output.shape + if self.data_rep in ['rot6d', 'xyz', 'hml_vec']: + output = self.poseFinal(output) # [seqlen, bs, 150] + elif self.data_rep == 'rot_vel': + first_pose = output[[0]] # [1, bs, d] + first_pose = self.poseFinal(first_pose) # [1, bs, 150] + vel = output[1:] # [seqlen-1, bs, d] + vel = self.velFinal(vel) # [seqlen-1, bs, 150] + output = torch.cat((first_pose, vel), axis=0) # [seqlen, bs, 150] + else: + raise ValueError + output = output.reshape(nframes, bs, self.njoints, self.nfeats) + output = output.permute(1, 2, 3, 0) # [bs, njoints, nfeats, nframes] + return output + + +class EmbedAction(nn.Module): + def __init__(self, num_actions, latent_dim): + super().__init__() + self.action_embedding = nn.Parameter(torch.randn(num_actions, latent_dim)) + + def forward(self, input): + idx = input[:, 0].to(torch.long) # an index array must be long + output = self.action_embedding[idx] + return output + +class EmbedTargetLocSingle(nn.Module): + def __init__(self, all_goal_joint_names, latent_dim, num_layers=1): + super().__init__() + self.extended_goal_joint_names = all_goal_joint_names + ['traj', 'heading'] + self.target_cond_dim = len(self.extended_goal_joint_names) * 4 # 4 => (x,y,z,is_valid) + self.latent_dim = latent_dim + _layers = [nn.Linear(self.target_cond_dim, self.latent_dim)] + for _ in range(num_layers): + _layers += [nn.SiLU(), nn.Linear(self.latent_dim, self.latent_dim)] + self.mlp = nn.Sequential(*_layers) + + def forward(self, input, target_joint_names, target_heading): + # TODO - generate validity from outside the model + validity = torch.zeros_like(input)[..., :1] + for sample_idx, sample_joint_names in enumerate(target_joint_names): + sample_joint_names_w_heading = np.append(sample_joint_names, 'heading') if target_heading[sample_idx] else sample_joint_names + for j in sample_joint_names_w_heading: + validity[sample_idx, self.extended_goal_joint_names.index(j)] = 1. + + mlp_input = torch.cat([input, validity], dim=-1).view(input.shape[0], -1) + return self.mlp(mlp_input) + + +class EmbedTargetLocSplit(nn.Module): + def __init__(self, all_goal_joint_names, latent_dim, num_layers=1): + super().__init__() + self.extended_goal_joint_names = all_goal_joint_names + ['traj', 'heading'] + self.target_cond_dim = 4 + self.latent_dim = latent_dim + self.splited_dim = self.latent_dim // len(self.extended_goal_joint_names) + assert self.latent_dim % len(self.extended_goal_joint_names) == 0 + self.mini_mlps = nn.ModuleList() + for _ in self.extended_goal_joint_names: + _layers = [nn.Linear(self.target_cond_dim, self.splited_dim)] + for _ in range(num_layers): + _layers += [nn.SiLU(), nn.Linear(self.splited_dim, self.splited_dim)] + self.mini_mlps.append(nn.Sequential(*_layers)) + + def forward(self, input, target_joint_names, target_heading): + # TODO - generate validity from outside the model + validity = torch.zeros_like(input)[..., :1] + for sample_idx, sample_joint_names in enumerate(target_joint_names): + sample_joint_names_w_heading = np.append(sample_joint_names, 'heading') if target_heading[sample_idx] else sample_joint_names + for j in sample_joint_names_w_heading: + validity[sample_idx, self.extended_goal_joint_names.index(j)] = 1. + + mlp_input = torch.cat([input, validity], dim=-1) + mlp_splits = [self.mini_mlps[i](mlp_input[:, i]) for i in range(mlp_input.shape[1])] + return torch.cat(mlp_splits, dim=-1) + +class EmbedTargetLocMulti(nn.Module): + def __init__(self, all_goal_joint_names, latent_dim): + super().__init__() + + # todo: use a tensor of weight per joint, and another one for biases, then apply a selection in one go like we to for actions + self.extended_goal_joint_names = all_goal_joint_names + ['traj', 'heading'] + self.extended_goal_joint_idx = {joint_name: idx for idx, joint_name in enumerate(self.extended_goal_joint_names)} + self.n_extended_goal_joints = len(self.extended_goal_joint_names) + self.target_loc_emb = nn.ParameterDict({joint_name: + nn.Sequential( + nn.Linear(3, latent_dim), + nn.SiLU(), + nn.Linear(latent_dim, latent_dim)) + for joint_name in self.extended_goal_joint_names}) # todo: check if 3 works for heading and traj + # nn.Linear(3, latent_dim) for joint_name in self.extended_goal_joint_names}) # todo: check if 3 works for heading and traj + self.target_all_loc_emb = WeightedSum(self.n_extended_goal_joints) # nn.Linear(self.n_extended_goal_joints, latent_dim) + self.latent_dim = latent_dim + + def forward(self, input, target_joint_names, target_heading): + output = torch.zeros((input.shape[0], self.latent_dim), dtype=input.dtype, device=input.device) + + # Iterate over the batch and apply the appropriate filter for each joint + for sample_idx, sample_joint_names in enumerate(target_joint_names): + sample_joint_names_w_heading = np.append(sample_joint_names, 'heading') if target_heading[sample_idx] else sample_joint_names + output_one_sample = torch.zeros((self.n_extended_goal_joints, self.latent_dim), dtype=input.dtype, device=input.device) + for joint_name in sample_joint_names_w_heading: + layer = self.target_loc_emb[joint_name] + output_one_sample[self.extended_goal_joint_idx[joint_name]] = layer(input[sample_idx, self.extended_goal_joint_idx[joint_name]]) + output[sample_idx] = self.target_all_loc_emb(output_one_sample) + # print(torch.where(output_one_sample.sum(axis=1)!=0)[0].cpu().numpy()) + + return output diff --git a/motion_diffusion_model/model/rotation2xyz.py b/motion_diffusion_model/model/rotation2xyz.py new file mode 100644 index 0000000000000000000000000000000000000000..9746c7d73f2e30bfb2495cb901f8422f04ecbf5b --- /dev/null +++ b/motion_diffusion_model/model/rotation2xyz.py @@ -0,0 +1,92 @@ +# This code is based on https://github.com/Mathux/ACTOR.git +import torch +import utils.rotation_conversions as geometry + + +from model.smpl import SMPL, JOINTSTYPE_ROOT +# from .get_model import JOINTSTYPES +JOINTSTYPES = ["a2m", "a2mpl", "smpl", "vibe", "vertices"] + + +class Rotation2xyz: + def __init__(self, device, dataset='amass'): + self.device = device + self.dataset = dataset + self.smpl_model = SMPL().eval().to(device) + + def __call__(self, x, mask, pose_rep, translation, glob, + jointstype, vertstrans, betas=None, beta=0, + glob_rot=None, get_rotations_back=False, **kwargs): + if pose_rep == "xyz": + return x + + if mask is None: + mask = torch.ones((x.shape[0], x.shape[-1]), dtype=bool, device=x.device) + + if not glob and glob_rot is None: + raise TypeError("You must specify global rotation if glob is False") + + if jointstype not in JOINTSTYPES: + raise NotImplementedError("This jointstype is not implemented.") + + if translation: + x_translations = x[:, -1, :3] + x_rotations = x[:, :-1] + else: + x_rotations = x + + x_rotations = x_rotations.permute(0, 3, 1, 2) + nsamples, time, njoints, feats = x_rotations.shape + + # Compute rotations (convert only masked sequences output) + if pose_rep == "rotvec": + rotations = geometry.axis_angle_to_matrix(x_rotations[mask]) + elif pose_rep == "rotmat": + rotations = x_rotations[mask].view(-1, njoints, 3, 3) + elif pose_rep == "rotquat": + rotations = geometry.quaternion_to_matrix(x_rotations[mask]) + elif pose_rep == "rot6d": + rotations = geometry.rotation_6d_to_matrix(x_rotations[mask]) + else: + raise NotImplementedError("No geometry for this one.") + + if not glob: + global_orient = torch.tensor(glob_rot, device=x.device) + global_orient = geometry.axis_angle_to_matrix(global_orient).view(1, 1, 3, 3) + global_orient = global_orient.repeat(len(rotations), 1, 1, 1) + else: + global_orient = rotations[:, 0] + rotations = rotations[:, 1:] + + if betas is None: + betas = torch.zeros([rotations.shape[0], self.smpl_model.num_betas], + dtype=rotations.dtype, device=rotations.device) + betas[:, 1] = beta + # import ipdb; ipdb.set_trace() + out = self.smpl_model(body_pose=rotations, global_orient=global_orient, betas=betas) + + # get the desirable joints + joints = out[jointstype] + + x_xyz = torch.empty(nsamples, time, joints.shape[1], 3, device=x.device, dtype=x.dtype) + x_xyz[~mask] = 0 + x_xyz[mask] = joints + + x_xyz = x_xyz.permute(0, 2, 3, 1).contiguous() + + # the first translation root at the origin on the prediction + if jointstype != "vertices": + rootindex = JOINTSTYPE_ROOT[jointstype] + x_xyz = x_xyz - x_xyz[:, [rootindex], :, :] + + if translation and vertstrans: + # the first translation root at the origin + x_translations = x_translations - x_translations[:, :, [0]] + + # add the translation to all the joints + x_xyz = x_xyz + x_translations[:, None, :, :] + + if get_rotations_back: + return x_xyz, rotations, global_orient + else: + return x_xyz diff --git a/motion_diffusion_model/model/smpl.py b/motion_diffusion_model/model/smpl.py new file mode 100644 index 0000000000000000000000000000000000000000..587f5419601a74df92c1e37263b28d4aa6a7c0a9 --- /dev/null +++ b/motion_diffusion_model/model/smpl.py @@ -0,0 +1,97 @@ +# This code is based on https://github.com/Mathux/ACTOR.git +import numpy as np +import torch + +import contextlib + +from smplx import SMPLLayer as _SMPLLayer +from smplx.lbs import vertices2joints + + +# action2motion_joints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 21, 24, 38] +# change 0 and 8 +action2motion_joints = [8, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 12, 13, 14, 21, 24, 38] + +from utils.config import SMPL_MODEL_PATH, JOINT_REGRESSOR_TRAIN_EXTRA + +JOINTSTYPE_ROOT = {"a2m": 0, # action2motion + "smpl": 0, + "a2mpl": 0, # set(smpl, a2m) + "vibe": 8} # 0 is the 8 position: OP MidHip below + +JOINT_MAP = { + 'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17, + 'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16, + 'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0, + 'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8, + 'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7, + 'OP REye': 25, 'OP LEye': 26, 'OP REar': 27, + 'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30, + 'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34, + 'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45, + 'Left Hip': 46, 'Left Knee': 4, 'Left Ankle': 7, + 'Right Wrist': 21, 'Right Elbow': 19, 'Right Shoulder': 17, + 'Left Shoulder': 16, 'Left Elbow': 18, 'Left Wrist': 20, + 'Neck (LSP)': 47, 'Top of Head (LSP)': 48, + 'Pelvis (MPII)': 49, 'Thorax (MPII)': 50, + 'Spine (H36M)': 51, 'Jaw (H36M)': 52, + 'Head (H36M)': 53, 'Nose': 24, 'Left Eye': 26, + 'Right Eye': 25, 'Left Ear': 28, 'Right Ear': 27 +} + +JOINT_NAMES = [ + 'OP Nose', 'OP Neck', 'OP RShoulder', + 'OP RElbow', 'OP RWrist', 'OP LShoulder', + 'OP LElbow', 'OP LWrist', 'OP MidHip', + 'OP RHip', 'OP RKnee', 'OP RAnkle', + 'OP LHip', 'OP LKnee', 'OP LAnkle', + 'OP REye', 'OP LEye', 'OP REar', + 'OP LEar', 'OP LBigToe', 'OP LSmallToe', + 'OP LHeel', 'OP RBigToe', 'OP RSmallToe', 'OP RHeel', + 'Right Ankle', 'Right Knee', 'Right Hip', + 'Left Hip', 'Left Knee', 'Left Ankle', + 'Right Wrist', 'Right Elbow', 'Right Shoulder', + 'Left Shoulder', 'Left Elbow', 'Left Wrist', + 'Neck (LSP)', 'Top of Head (LSP)', + 'Pelvis (MPII)', 'Thorax (MPII)', + 'Spine (H36M)', 'Jaw (H36M)', + 'Head (H36M)', 'Nose', 'Left Eye', + 'Right Eye', 'Left Ear', 'Right Ear' +] + + +# adapted from VIBE/SPIN to output smpl_joints, vibe joints and action2motion joints +class SMPL(_SMPLLayer): + """ Extension of the official SMPL implementation to support more joints """ + + def __init__(self, model_path=SMPL_MODEL_PATH, **kwargs): + kwargs["model_path"] = model_path + + # remove the verbosity for the 10-shapes beta parameters + with contextlib.redirect_stdout(None): + super(SMPL, self).__init__(**kwargs) + + J_regressor_extra = np.load(JOINT_REGRESSOR_TRAIN_EXTRA) + self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32)) + vibe_indexes = np.array([JOINT_MAP[i] for i in JOINT_NAMES]) + a2m_indexes = vibe_indexes[action2motion_joints] + smpl_indexes = np.arange(24) + a2mpl_indexes = np.unique(np.r_[smpl_indexes, a2m_indexes]) + + self.maps = {"vibe": vibe_indexes, + "a2m": a2m_indexes, + "smpl": smpl_indexes, + "a2mpl": a2mpl_indexes} + + def forward(self, *args, **kwargs): + smpl_output = super(SMPL, self).forward(*args, **kwargs) + + extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices) + all_joints = torch.cat([smpl_output.joints, extra_joints], dim=1) + + output = {"vertices": smpl_output.vertices} + + for joinstype, indexes in self.maps.items(): + output[joinstype] = all_joints[:, indexes] + + return output \ No newline at end of file diff --git a/motion_diffusion_model/sample/edit.py b/motion_diffusion_model/sample/edit.py new file mode 100644 index 0000000000000000000000000000000000000000..1b1b127ddd91ab8d86bc02e7340b3e3273047be8 --- /dev/null +++ b/motion_diffusion_model/sample/edit.py @@ -0,0 +1,212 @@ +# This code is based on https://github.com/openai/guided-diffusion +""" +Generate a large batch of image samples from a model and save them as a large +numpy array. This can be used to produce samples for FID evaluation. +""" +from utils.fixseed import fixseed +import os +import numpy as np +import torch +from utils.parser_util import edit_args +from sample.generate import save_multiple_samples, construct_template_variables +from utils.model_util import create_model_and_diffusion, load_saved_model +from utils import dist_util +from utils.sampler_util import ClassifierFreeSampleModel +from data_loaders.get_data import get_dataset_loader +from data_loaders.humanml.scripts.motion_process import recover_from_ric +from data_loaders import humanml_utils +import data_loaders.humanml.utils.paramUtil as paramUtil +from data_loaders.humanml.utils.plot_script import plot_3d_motion +import shutil + + +def main(): + args = edit_args() + fixseed(args.seed) + out_path = args.output_dir + name = os.path.basename(os.path.dirname(args.model_path)) + niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') + max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60 + fps = 12.5 if args.dataset == 'kit' else 20 + n_frames = 120 # min(max_frames, int(args.motion_length*fps)) + + dist_util.setup_dist(args.device) + if out_path == '': + out_path = os.path.join(os.path.dirname(args.model_path), + 'edit_{}_{}_{}_seed{}'.format(name, niter, args.edit_mode, args.seed)) + if args.text_condition != '': + out_path += '_' + args.text_condition.replace(' ', '_').replace('.', '') + + print('Loading dataset...') + assert args.num_samples <= args.batch_size, \ + f'Please either increase batch_size({args.batch_size}) or reduce num_samples({args.num_samples})' + # So why do we need this check? In order to protect GPU from a memory overload in the following line. + # If your GPU can handle batch size larger then default, you can specify it through --batch_size flag. + # If it doesn't, and you still want to sample more prompts, run this script with different seeds + # (specify through the --seed flag) + args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples + data = get_dataset_loader(name=args.dataset, + batch_size=args.batch_size, + num_frames=max_frames, + split='test', + hml_mode='train') # in train mode, you get both text and motion. + # data.fixed_length = n_frames + total_num_samples = args.num_samples * args.num_repetitions + + print("Creating model and diffusion...") + model, diffusion = create_model_and_diffusion(args, data) + + print(f"Loading checkpoints from [{args.model_path}]...") + load_saved_model(model, args.model_path, use_avg=args.use_ema) + + model = ClassifierFreeSampleModel(model) # wrapping model with the classifier-free sampler + model.to(dist_util.dev()) + model.eval() # disable random masking + + iterator = iter(data) + input_motions, model_kwargs = next(iterator) + input_motions = input_motions.to(dist_util.dev()) + texts = [args.text_condition] * args.num_samples + model_kwargs['y']['text'] = texts + if args.text_condition == '': + args.guidance_param = 0. # Force unconditioned generation + + # add inpainting mask according to args + assert max_frames == input_motions.shape[-1] + gt_frames_per_sample = {} + model_kwargs['y']['inpainted_motion'] = input_motions + if args.edit_mode == 'in_between': + model_kwargs['y']['inpainting_mask'] = torch.ones_like(input_motions, dtype=torch.bool, + device=input_motions.device) # True means use gt motion + for i, length in enumerate(model_kwargs['y']['lengths'].cpu().numpy()): + start_idx, end_idx = int(args.prefix_end * length), int(args.suffix_start * length) + gt_frames_per_sample[i] = list(range(0, start_idx)) + list(range(end_idx, max_frames)) + model_kwargs['y']['inpainting_mask'][i, :, :, + start_idx: end_idx] = False # do inpainting in those frames + elif args.edit_mode == 'upper_body': + model_kwargs['y']['inpainting_mask'] = torch.tensor(humanml_utils.HML_LOWER_BODY_MASK, dtype=torch.bool, + device=input_motions.device) # True is lower body data + model_kwargs['y']['inpainting_mask'] = model_kwargs['y']['inpainting_mask'].unsqueeze(0).unsqueeze( + -1).unsqueeze(-1).repeat(input_motions.shape[0], 1, input_motions.shape[2], input_motions.shape[3]) + + all_motions = [] + all_lengths = [] + all_text = [] + + for rep_i in range(args.num_repetitions): + print(f'### Start sampling [repetitions #{rep_i}]') + + # add CFG scale to batch + model_kwargs['y']['scale'] = torch.ones(args.batch_size, device=dist_util.dev()) * args.guidance_param + + sample_fn = diffusion.p_sample_loop + + sample = sample_fn( + model, + (args.batch_size, model.njoints, model.nfeats, max_frames), + clip_denoised=False, + model_kwargs=model_kwargs, + skip_timesteps=0, # 0 is the default value - i.e. don't skip any step + init_image=None, + progress=True, + dump_steps=None, + noise=None, + const_noise=False, + ) + + + # Recover XYZ *positions* from HumanML3D vector representation + if model.data_rep == 'hml_vec': + n_joints = 22 if sample.shape[1] == 263 else 21 + sample = data.dataset.t2m_dataset.inv_transform(sample.cpu().permute(0, 2, 3, 1)).float() + sample = recover_from_ric(sample, n_joints) + sample = sample.view(-1, *sample.shape[2:]).permute(0, 2, 3, 1) + + all_text += model_kwargs['y']['text'] + all_motions.append(sample.cpu().numpy()) + all_lengths.append(model_kwargs['y']['lengths'].cpu().numpy()) + + print(f"created {len(all_motions) * args.batch_size} samples") + + + all_motions = np.concatenate(all_motions, axis=0) + all_motions = all_motions[:total_num_samples] # [bs, njoints, 6, seqlen] + all_text = all_text[:total_num_samples] + all_lengths = np.concatenate(all_lengths, axis=0)[:total_num_samples] + + if os.path.exists(out_path): + shutil.rmtree(out_path) + os.makedirs(out_path) + + npy_path = os.path.join(out_path, 'results.npy') + print(f"saving results file to [{npy_path}]") + np.save(npy_path, + {'motion': all_motions, 'text': all_text, 'lengths': all_lengths, + 'num_samples': args.num_samples, 'num_repetitions': args.num_repetitions}) + with open(npy_path.replace('.npy', '.txt'), 'w') as fw: + fw.write('\n'.join(all_text)) + with open(npy_path.replace('.npy', '_len.txt'), 'w') as fw: + fw.write('\n'.join([str(l) for l in all_lengths])) + + print(f"saving visualizations to [{out_path}]...") + skeleton = paramUtil.kit_kinematic_chain if args.dataset == 'kit' else paramUtil.t2m_kinematic_chain + + # Recover XYZ *positions* from HumanML3D vector representation + if model.data_rep == 'hml_vec': + input_motions = data.dataset.t2m_dataset.inv_transform(input_motions.cpu().permute(0, 2, 3, 1)).float() + input_motions = recover_from_ric(input_motions, n_joints) + input_motions = input_motions.view(-1, *input_motions.shape[2:]).permute(0, 2, 3, 1).cpu().numpy() + + + sample_print_template, row_print_template, all_print_template, \ + sample_file_template, row_file_template, all_file_template = construct_template_variables(args.unconstrained) + max_vis_samples = 6 + num_vis_samples = min(args.num_samples, max_vis_samples) + animations = np.empty(shape=(args.num_samples, args.num_repetitions), dtype=object) + max_length = max(all_lengths) + + for sample_i in range(args.num_samples): + caption = 'Input Motion' + length = model_kwargs['y']['lengths'][sample_i] + motion = input_motions[sample_i].transpose(2, 0, 1)[:length] + save_file = 'input_motion{:02d}.mp4'.format(sample_i) + animation_save_path = os.path.join(out_path, save_file) + rep_files = [animation_save_path] + # FIXME - fix and bring back the following: + # print(f'[({sample_i}) "{caption}" | -> {save_file}]') + # plot_3d_motion(animation_save_path, skeleton, motion, title=caption, + # dataset=args.dataset, fps=fps, vis_mode='gt', + # gt_frames=gt_frames_per_sample.get(sample_i, [])) + for rep_i in range(args.num_repetitions): + caption = all_text[rep_i*args.batch_size + sample_i] + if caption == '': + caption = 'Edit [{}] unconditioned'.format(args.edit_mode) + else: + caption = 'Edit [{}]: {}'.format(args.edit_mode, caption) + length = all_lengths[rep_i*args.batch_size + sample_i] + motion = all_motions[rep_i*args.batch_size + sample_i].transpose(2, 0, 1)[:length] + save_file = 'sample{:02d}_rep{:02d}.mp4'.format(sample_i, rep_i) + animation_save_path = os.path.join(out_path, save_file) + rep_files.append(animation_save_path) + gt_frames = gt_frames_per_sample.get(sample_i, []) + print(f'[({sample_i}) "{caption}" | Rep #{rep_i} | -> {save_file}]') + animations[sample_i, rep_i] = plot_3d_motion(animation_save_path, + skeleton, motion, dataset=args.dataset, title=caption, + fps=fps, gt_frames=gt_frames) + # Credit for visualization: https://github.com/EricGuo5513/text-to-motion + + all_rep_save_file = os.path.join(out_path, 'sample{:02d}.mp4'.format(sample_i)) + ffmpeg_rep_files = [f' -i {f} ' for f in rep_files] + hstack_args = f' -filter_complex hstack=inputs={args.num_repetitions+1}' + ffmpeg_rep_cmd = f'ffmpeg -y -loglevel warning ' + ''.join(ffmpeg_rep_files) + f'{hstack_args} {all_rep_save_file}' + os.system(ffmpeg_rep_cmd) + print(f'[({sample_i}) "{caption}" | all repetitions | -> {all_rep_save_file}]') + + save_multiple_samples(out_path, {'all': all_file_template}, animations, fps, max(list(all_lengths) + [n_frames])) + + abs_path = os.path.abspath(out_path) + print(f'[Done] Results are at [{abs_path}]') + + +if __name__ == "__main__": + main() diff --git a/motion_diffusion_model/sample/generate.py b/motion_diffusion_model/sample/generate.py new file mode 100644 index 0000000000000000000000000000000000000000..3507becf91ba6193d8e4653e65952dcbccb6952f --- /dev/null +++ b/motion_diffusion_model/sample/generate.py @@ -0,0 +1,318 @@ +# This code is based on https://github.com/openai/guided-diffusion +""" +Generate a large batch of image samples from a model and save them as a large +numpy array. This can be used to produce samples for FID evaluation. +""" +from utils.fixseed import fixseed +import os +import numpy as np +import torch +from utils.parser_util import generate_args +from utils.model_util import create_model_and_diffusion, load_saved_model +from utils import dist_util +from utils.sampler_util import ClassifierFreeSampleModel, AutoRegressiveSampler +from data_loaders.get_data import get_dataset_loader +from data_loaders.humanml.scripts.motion_process import recover_from_ric, get_target_location, sample_goal +import data_loaders.humanml.utils.paramUtil as paramUtil +from data_loaders.humanml.utils.plot_script import plot_3d_motion +import shutil +from data_loaders.tensors import collate +from moviepy.editor import clips_array + + +def main(args=None): + if args is None: + # args is None unless this method is called from another function (e.g. during training) + args = generate_args() + fixseed(args.seed) + out_path = args.output_dir + n_joints = 22 if args.dataset == 'humanml' else 21 + name = os.path.basename(os.path.dirname(args.model_path)) + niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') + max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60 + fps = 12.5 if args.dataset == 'kit' else 20 + n_frames = min(max_frames, int(args.motion_length*fps)) + is_using_data = not any([args.input_text, args.text_prompt, args.action_file, args.action_name]) + if args.context_len > 0: + is_using_data = True # For prefix completion, we need to sample a prefix + dist_util.setup_dist(args.device) + if out_path == '': + out_path = os.path.join(os.path.dirname(args.model_path), + 'samples_{}_{}_seed{}'.format(name, niter, args.seed)) + if args.text_prompt != '': + out_path += '_' + args.text_prompt.replace(' ', '_').replace('.', '') + elif args.input_text != '': + out_path += '_' + os.path.basename(args.input_text).replace('.txt', '').replace(' ', '_').replace('.', '') + elif args.dynamic_text_path != '': + out_path += '_' + os.path.basename(args.dynamic_text_path).replace('.txt', '').replace(' ', '_').replace('.', '') + + # this block must be called BEFORE the dataset is loaded + texts = None + if args.text_prompt != '': + texts = [args.text_prompt] * args.num_samples + elif args.input_text != '': + assert os.path.exists(args.input_text) + with open(args.input_text, 'r') as fr: + texts = fr.readlines() + texts = [s.replace('\n', '') for s in texts] + args.num_samples = len(texts) + elif args.dynamic_text_path != '': + assert os.path.exists(args.dynamic_text_path) + assert args.autoregressive, "Dynamic text sampling is only supported with autoregressive sampling." + with open(args.dynamic_text_path, 'r') as fr: + texts = fr.readlines() + texts = [s.replace('\n', '') for s in texts] + n_frames = len(texts) * args.pred_len # each text prompt is for a single prediction + elif args.action_name: + action_text = [args.action_name] + args.num_samples = 1 + elif args.action_file != '': + assert os.path.exists(args.action_file) + with open(args.action_file, 'r') as fr: + action_text = fr.readlines() + action_text = [s.replace('\n', '') for s in action_text] + args.num_samples = len(action_text) + + args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples + + print('Loading dataset...') + data = load_dataset(args, max_frames, n_frames) + total_num_samples = args.num_samples * args.num_repetitions + + print("Creating model and diffusion...") + model, diffusion = create_model_and_diffusion(args, data) + + sample_fn = diffusion.p_sample_loop + if args.autoregressive: + sample_cls = AutoRegressiveSampler(args, sample_fn, n_frames) + sample_fn = sample_cls.sample + + print(f"Loading checkpoints from [{args.model_path}]...") + load_saved_model(model, args.model_path, use_avg=args.use_ema) + + if args.guidance_param != 1: + model = ClassifierFreeSampleModel(model) # wrapping model with the classifier-free sampler + model.to(dist_util.dev()) + model.eval() # disable random masking + + motion_shape = (args.batch_size, model.njoints, model.nfeats, n_frames) + + if is_using_data: + iterator = iter(data) + input_motion, model_kwargs = next(iterator) + input_motion = input_motion.to(dist_util.dev()) + if texts is not None: + model_kwargs['y']['text'] = texts + else: + collate_args = [{'inp': torch.zeros(n_frames), 'tokens': None, 'lengths': n_frames}] * args.num_samples + is_t2m = any([args.input_text, args.text_prompt]) + if is_t2m: + # t2m + collate_args = [dict(arg, text=txt) for arg, txt in zip(collate_args, texts)] + else: + # a2m + action = data.dataset.action_name_to_action(action_text) + collate_args = [dict(arg, action=one_action, action_text=one_action_text) for + arg, one_action, one_action_text in zip(collate_args, action, action_text)] + _, model_kwargs = collate(collate_args) + + model_kwargs['y'] = {key: val.to(dist_util.dev()) if torch.is_tensor(val) else val for key, val in model_kwargs['y'].items()} + init_image = None + + all_motions = [] + all_lengths = [] + all_text = [] + + # add CFG scale to batch + if args.guidance_param != 1: + model_kwargs['y']['scale'] = torch.ones(args.batch_size, device=dist_util.dev()) * args.guidance_param + + if 'text' in model_kwargs['y'].keys(): + # encoding once instead of each iteration saves lots of time + model_kwargs['y']['text_embed'] = model.encode_text(model_kwargs['y']['text']) + + if args.dynamic_text_path != '': + # Rearange the text to match the autoregressive sampling - each prompt fits to a single prediction + # Which is 2 seconds of motion by default + model_kwargs['y']['text'] = [model_kwargs['y']['text']] * args.num_samples + if args.text_encoder_type == 'bert': + model_kwargs['y']['text_embed'] = (model_kwargs['y']['text_embed'][0].unsqueeze(0).repeat(args.num_samples, 1, 1, 1), + model_kwargs['y']['text_embed'][1].unsqueeze(0).repeat(args.num_samples, 1, 1)) + else: + raise NotImplementedError('DiP model only supports BERT text encoder at the moment. If you implement this, please send a PR!') + + for rep_i in range(args.num_repetitions): + print(f'### Sampling [repetitions #{rep_i}]') + + sample = sample_fn( + model, + motion_shape, + clip_denoised=False, + model_kwargs=model_kwargs, + skip_timesteps=0, # 0 is the default value - i.e. don't skip any step + init_image=init_image, + progress=True, + dump_steps=None, + noise=None, + const_noise=False, + ) + + # Recover XYZ *positions* from HumanML3D vector representation + if model.data_rep == 'hml_vec': + n_joints = 22 if sample.shape[1] == 263 else 21 + sample = data.dataset.t2m_dataset.inv_transform(sample.cpu().permute(0, 2, 3, 1)).float() + sample = recover_from_ric(sample, n_joints) + sample = sample.view(-1, *sample.shape[2:]).permute(0, 2, 3, 1) + + rot2xyz_pose_rep = 'xyz' if model.data_rep in ['xyz', 'hml_vec'] else model.data_rep + rot2xyz_mask = None if rot2xyz_pose_rep == 'xyz' else model_kwargs['y']['mask'].reshape(args.batch_size, n_frames).bool() + sample = model.rot2xyz(x=sample, mask=rot2xyz_mask, pose_rep=rot2xyz_pose_rep, glob=True, translation=True, + jointstype='smpl', vertstrans=True, betas=None, beta=0, glob_rot=None, + get_rotations_back=False) + + if args.unconstrained: + all_text += ['unconstrained'] * args.num_samples + else: + text_key = 'text' if 'text' in model_kwargs['y'] else 'action_text' + all_text += model_kwargs['y'][text_key] + + all_motions.append(sample.cpu().numpy()) + _len = model_kwargs['y']['lengths'].cpu().numpy() + if 'prefix' in model_kwargs['y'].keys(): + _len[:] = sample.shape[-1] + all_lengths.append(_len) + + print(f"created {len(all_motions) * args.batch_size} samples") + + + all_motions = np.concatenate(all_motions, axis=0) + all_motions = all_motions[:total_num_samples] # [bs, njoints, 6, seqlen] + all_text = all_text[:total_num_samples] + all_lengths = np.concatenate(all_lengths, axis=0)[:total_num_samples] + + if os.path.exists(out_path): + shutil.rmtree(out_path) + os.makedirs(out_path) + + npy_path = os.path.join(out_path, 'results.npy') + print(f"saving results file to [{npy_path}]") + np.save(npy_path, + {'motion': all_motions, 'text': all_text, 'lengths': all_lengths, + 'num_samples': args.num_samples, 'num_repetitions': args.num_repetitions}) + if args.dynamic_text_path != '': + text_file_content = '\n'.join(['#'.join(s) for s in all_text]) + else: + text_file_content = '\n'.join(all_text) + with open(npy_path.replace('.npy', '.txt'), 'w') as fw: + fw.write(text_file_content) + with open(npy_path.replace('.npy', '_len.txt'), 'w') as fw: + fw.write('\n'.join([str(l) for l in all_lengths])) + + print(f"saving visualizations to [{out_path}]...") + skeleton = paramUtil.kit_kinematic_chain if args.dataset == 'kit' else paramUtil.t2m_kinematic_chain + + sample_print_template, row_print_template, all_print_template, \ + sample_file_template, row_file_template, all_file_template = construct_template_variables(args.unconstrained) + max_vis_samples = 6 + num_vis_samples = min(args.num_samples, max_vis_samples) + animations = np.empty(shape=(args.num_samples, args.num_repetitions), dtype=object) + max_length = max(all_lengths) + + for sample_i in range(args.num_samples): + rep_files = [] + for rep_i in range(args.num_repetitions): + caption = all_text[rep_i*args.batch_size + sample_i] + if args.dynamic_text_path != '': # caption per frame + assert type(caption) == list + caption_per_frame = [] + for c in caption: + caption_per_frame += [c] * args.pred_len + caption = caption_per_frame + + + # Trim / freeze motion if needed + length = all_lengths[rep_i*args.batch_size + sample_i] + motion = all_motions[rep_i*args.batch_size + sample_i].transpose(2, 0, 1)[:max_length] + if motion.shape[0] > length: + motion[length:-1] = motion[length-1] # duplicate the last frame to end of motion, so all motions will be in equal length + + save_file = sample_file_template.format(sample_i, rep_i) + animation_save_path = os.path.join(out_path, save_file) + gt_frames = np.arange(args.context_len) if args.context_len > 0 and not args.autoregressive else [] + animations[sample_i, rep_i] = plot_3d_motion(animation_save_path, + skeleton, motion, dataset=args.dataset, title=caption, + fps=fps, gt_frames=gt_frames) + rep_files.append(animation_save_path) + + save_multiple_samples(out_path, {'all': all_file_template}, animations, fps, max(list(all_lengths) + [n_frames])) + + abs_path = os.path.abspath(out_path) + print(f'[Done] Results are at [{abs_path}]') + + return out_path + + +def save_multiple_samples(out_path, file_templates, animations, fps, max_frames, no_dir=False): + + num_samples_in_out_file = 3 + n_samples = animations.shape[0] + + for sample_i in range(0,n_samples,num_samples_in_out_file): + last_sample_i = min(sample_i+num_samples_in_out_file, n_samples) + all_sample_save_file = file_templates['all'].format(sample_i, last_sample_i-1) + if no_dir and n_samples <= num_samples_in_out_file: + all_sample_save_path = out_path + else: + all_sample_save_path = os.path.join(out_path, all_sample_save_file) + print(f'saving {os.path.split(out_path)[1]}/{all_sample_save_file}') + + clips = clips_array(animations[sample_i:last_sample_i]) + clips.duration = max_frames/fps + + # import time + # start = time.time() + clips.write_videofile(all_sample_save_path, fps=fps, threads=4, logger=None) + # print(f'duration = {time.time()-start}') + + for clip in clips.clips: + # close internal clips. Does nothing but better use in case one day it will do something + clip.close() + clips.close() # important + + +def construct_template_variables(unconstrained): + row_file_template = 'sample{:02d}.mp4' + all_file_template = 'samples_{:02d}_to_{:02d}.mp4' + if unconstrained: + sample_file_template = 'row{:02d}_col{:02d}.mp4' + sample_print_template = '[{} row #{:02d} column #{:02d} | -> {}]' + row_file_template = row_file_template.replace('sample', 'row') + row_print_template = '[{} row #{:02d} | all columns | -> {}]' + all_file_template = all_file_template.replace('samples', 'rows') + all_print_template = '[rows {:02d} to {:02d} | -> {}]' + else: + sample_file_template = 'sample{:02d}_rep{:02d}.mp4' + sample_print_template = '["{}" ({:02d}) | Rep #{:02d} | -> {}]' + row_print_template = '[ "{}" ({:02d}) | all repetitions | -> {}]' + all_print_template = '[samples {:02d} to {:02d} | all repetitions | -> {}]' + + return sample_print_template, row_print_template, all_print_template, \ + sample_file_template, row_file_template, all_file_template + + +def load_dataset(args, max_frames, n_frames): + data = get_dataset_loader(name=args.dataset, + batch_size=args.batch_size, + num_frames=max_frames, + split='test', + hml_mode='train' if args.pred_len > 0 else 'text_only', # We need to sample a prefix from the dataset + fixed_len=args.pred_len + args.context_len, pred_len=args.pred_len, device=dist_util.dev()) + data.fixed_length = n_frames + return data + + +def is_substr_in_list(substr, list_of_strs): + return np.char.find(list_of_strs, substr) != -1 # [substr in string for string in list_of_strs] + +if __name__ == "__main__": + main() diff --git a/motion_diffusion_model/sample/predict.py b/motion_diffusion_model/sample/predict.py new file mode 100644 index 0000000000000000000000000000000000000000..84455d9e67c74a63ea952825fa4266e6caba1253 --- /dev/null +++ b/motion_diffusion_model/sample/predict.py @@ -0,0 +1,167 @@ +import os +import subprocess +from typing import Any, List, Optional +from argparse import Namespace + +import torch +from cog import BasePredictor, Input, Path, BaseModel + +import data_loaders.humanml.utils.paramUtil as paramUtil +from data_loaders.get_data import get_dataset_loader +from data_loaders.humanml.scripts.motion_process import recover_from_ric +from data_loaders.humanml.utils.plot_script import plot_3d_motion +from data_loaders.tensors import collate +from utils.sampler_util import ClassifierFreeSampleModel +from utils import dist_util +from utils.model_util import create_model_and_diffusion, load_model_wo_clip +from visualize.motions2hik import motions2hik +from sample.generate import construct_template_variables + +""" +In case of matplot lib issues it may be needed to delete model/data_loaders/humanml/utils/plot_script.py" in lines 89~92 as +suggested in https://github.com/GuyTevet/motion-diffusion-model/issues/6 +""" + + +class ModelOutput(BaseModel): + json_file: Optional[Any] + animation: Optional[List[Path]] + + +def get_args(): + args = Namespace() + args.fps = 20 + args.model_path = './save/humanml_trans_enc_512/model000200000.pt' + args.guidance_param = 2.5 + args.unconstrained = False + args.dataset = 'humanml' + + args.cond_mask_prob = 1 + args.emb_trans_dec = False + args.latent_dim = 512 + args.layers = 8 + args.arch = 'trans_enc' + + args.noise_schedule = 'cosine' + args.sigma_small = True + args.lambda_vel = 0.0 + args.lambda_rcxyz = 0.0 + args.lambda_fc = 0.0 + return args + + +class Predictor(BasePredictor): + def setup(self): + subprocess.run(["mkdir", "/root/.cache/clip"]) + subprocess.run(["cp", "-r", "ViT-B-32.pt", "/root/.cache/clip"]) + + self.args = get_args() + self.num_frames = self.args.fps * 6 + print('Loading dataset...') + + # temporary data + self.data = get_dataset_loader(name=self.args.dataset, + batch_size=1, + num_frames=196, + split='test', + hml_mode='text_only') + + self.data.fixed_length = float(self.num_frames) + + print("Creating model and diffusion...") + self.model, self.diffusion = create_model_and_diffusion(self.args, self.data) + + print(f"Loading checkpoints from...") + state_dict = torch.load(self.args.model_path, map_location='cpu') + load_model_wo_clip(self.model, state_dict) + + if self.args.guidance_param != 1: + self.model = ClassifierFreeSampleModel(self.model) # wrapping model with the classifier-free sampler + self.model.to(dist_util.dev()) + self.model.eval() # disable random masking + + def predict( + self, + prompt: str = Input(default="the person walked forward and is picking up his toolbox."), + num_repetitions: int = Input(default=3, description="How many"), + output_format: str = Input( + description='Choose the format of the output, either an animation or a json file of the animation data.\ + The json format is: {"thetas": [...], "root_translation": [...], "joint_map": [...]}, where "thetas" \ + is an [nframes x njoints x 3] array of joint rotations in degrees, "root_translation" is an [nframes x 3] \ + array of (X, Y, Z) positions of the root, and "joint_map" is a list mapping the SMPL joint index to the\ + corresponding HumanIK joint name', + default="animation", + choices=["animation", "json_file"], + ), + ) -> ModelOutput: + args = self.args + args.num_repetitions = int(num_repetitions) + + self.data = get_dataset_loader(name=self.args.dataset, + batch_size=args.num_repetitions, + num_frames=self.num_frames, + split='test', + hml_mode='text_only') + + collate_args = [{'inp': torch.zeros(self.num_frames), 'tokens': None, 'lengths': self.num_frames, 'text': str(prompt)}] + _, model_kwargs = collate(collate_args) + + # add CFG scale to batch + if args.guidance_param != 1: + model_kwargs['y']['scale'] = torch.ones(args.num_repetitions, device=dist_util.dev()) * args.guidance_param + + sample_fn = self.diffusion.p_sample_loop + sample = sample_fn( + self.model, + (args.num_repetitions, self.model.njoints, self.model.nfeats, self.num_frames), + clip_denoised=False, + model_kwargs=model_kwargs, + skip_timesteps=0, # 0 is the default value - i.e. don't skip any step + init_image=None, + progress=True, + dump_steps=None, + noise=None, + const_noise=False, + ) + + # Recover XYZ *positions* from HumanML3D vector representation + if self.model.data_rep == 'hml_vec': + n_joints = 22 if sample.shape[1] == 263 else 21 + sample = self.data.dataset.t2m_dataset.inv_transform(sample.cpu().permute(0, 2, 3, 1)).float() + sample = recover_from_ric(sample, n_joints) + sample = sample.view(-1, *sample.shape[2:]).permute(0, 2, 3, 1) + + rot2xyz_pose_rep = 'xyz' if self.model.data_rep in ['xyz', 'hml_vec'] else self.model.data_rep + rot2xyz_mask = None if rot2xyz_pose_rep == 'xyz' else model_kwargs['y']['mask'].reshape(args.num_repetitions, + self.num_frames).bool() + sample = self.model.rot2xyz(x=sample, mask=rot2xyz_mask, pose_rep=rot2xyz_pose_rep, glob=True, translation=True, + jointstype='smpl', vertstrans=True, betas=None, beta=0, glob_rot=None, + get_rotations_back=False) + + all_motions = sample.cpu().numpy() + + if output_format == 'json_file': + data_dict = motions2hik(all_motions) + return ModelOutput(json_file=data_dict) + + caption = str(prompt) + + skeleton = paramUtil.t2m_kinematic_chain + + sample_print_template, row_print_template, all_print_template, \ + sample_file_template, row_file_template, all_file_template = construct_template_variables( + args.unconstrained) + + rep_files = [] + replicate_fnames = [] + for rep_i in range(args.num_repetitions): + motion = all_motions[rep_i].transpose(2, 0, 1)[:self.num_frames] + save_file = sample_file_template.format(1, rep_i) + print(sample_print_template.format(caption, 1, rep_i, save_file)) + plot_3d_motion(save_file, skeleton, motion, dataset=args.dataset, title=caption, fps=args.fps) + # Credit for visualization: https://github.com/EricGuo5513/text-to-motion + rep_files.append(save_file) + + replicate_fnames.append(Path(save_file)) + + return ModelOutput(animation=replicate_fnames) diff --git a/motion_diffusion_model/utils/PYTORCH3D_LICENSE b/motion_diffusion_model/utils/PYTORCH3D_LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..bed0cebe976e160c9087d1b1054473bdacf75b3b --- /dev/null +++ b/motion_diffusion_model/utils/PYTORCH3D_LICENSE @@ -0,0 +1,30 @@ +BSD License + +For PyTorch3D software + +Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/motion_diffusion_model/utils/config.py b/motion_diffusion_model/utils/config.py new file mode 100644 index 0000000000000000000000000000000000000000..091d790e963959c326917688ee267e6a4ec136d1 --- /dev/null +++ b/motion_diffusion_model/utils/config.py @@ -0,0 +1,17 @@ +import os + +SMPL_DATA_PATH = "./body_models/smpl" + +SMPL_KINTREE_PATH = os.path.join(SMPL_DATA_PATH, "kintree_table.pkl") +SMPL_MODEL_PATH = os.path.join(SMPL_DATA_PATH, "SMPL_NEUTRAL.pkl") +JOINT_REGRESSOR_TRAIN_EXTRA = os.path.join(SMPL_DATA_PATH, 'J_regressor_extra.npy') + +ROT_CONVENTION_TO_ROT_NUMBER = { + 'legacy': 23, + 'no_hands': 21, + 'full_hands': 51, + 'mitten_hands': 33, +} + +GENDERS = ['neutral', 'male', 'female'] +NUM_BETAS = 10 \ No newline at end of file diff --git a/motion_diffusion_model/utils/dist_util.py b/motion_diffusion_model/utils/dist_util.py new file mode 100644 index 0000000000000000000000000000000000000000..9f5580a7890010ed4acdfcee8cb4eb7f8618769c --- /dev/null +++ b/motion_diffusion_model/utils/dist_util.py @@ -0,0 +1,77 @@ +""" +Helpers for distributed training. +""" + +import socket + +import torch as th +import torch.distributed as dist + +# Change this to reflect your cluster layout. +# The GPU for a given rank is (rank % GPUS_PER_NODE). +GPUS_PER_NODE = 8 + +SETUP_RETRY_COUNT = 3 + +used_device = 0 + +def setup_dist(device=0): + """ + Setup a distributed process group. + """ + global used_device + used_device = device + if dist.is_initialized(): + return + # os.environ["CUDA_VISIBLE_DEVICES"] = str(device) # f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}" + + # comm = MPI.COMM_WORLD + # backend = "gloo" if not th.cuda.is_available() else "nccl" + + # if backend == "gloo": + # hostname = "localhost" + # else: + # hostname = socket.gethostbyname(socket.getfqdn()) + # os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0) + # os.environ["RANK"] = str(comm.rank) + # os.environ["WORLD_SIZE"] = str(comm.size) + + # port = comm.bcast(_find_free_port(), root=used_device) + # os.environ["MASTER_PORT"] = str(port) + # dist.init_process_group(backend=backend, init_method="env://") + + +def dev(): + """ + Get the device to use for torch.distributed. + """ + global used_device + if th.cuda.is_available() and used_device>=0: + return th.device(f"cuda:{used_device}") + return th.device("cpu") + + +def load_state_dict(path, **kwargs): + """ + Load a PyTorch file without redundant fetches across MPI ranks. + """ + return th.load(path, **kwargs) + + +def sync_params(params): + """ + Synchronize a sequence of Tensors across ranks from rank 0. + """ + for p in params: + with th.no_grad(): + dist.broadcast(p, 0) + + +def _find_free_port(): + try: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.bind(("", 0)) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + return s.getsockname()[1] + finally: + s.close() diff --git a/motion_diffusion_model/utils/fixseed.py b/motion_diffusion_model/utils/fixseed.py new file mode 100644 index 0000000000000000000000000000000000000000..6f44f6ca263dcc410102a50970ce1b78405ba1f1 --- /dev/null +++ b/motion_diffusion_model/utils/fixseed.py @@ -0,0 +1,18 @@ +import numpy as np +import torch +import random + + +def fixseed(seed): + torch.backends.cudnn.benchmark = False + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + + +# SEED = 10 +# EVALSEED = 0 +# # Provoc warning: not fully functionnal yet +# # torch.set_deterministic(True) +# torch.backends.cudnn.benchmark = False +# fixseed(SEED) diff --git a/motion_diffusion_model/utils/loss_util.py b/motion_diffusion_model/utils/loss_util.py new file mode 100644 index 0000000000000000000000000000000000000000..2720716725e41e609ac90ca7d37cfde6bb495603 --- /dev/null +++ b/motion_diffusion_model/utils/loss_util.py @@ -0,0 +1,46 @@ +from diffusion.nn import mean_flat, sum_flat +import torch +import numpy as np + +def angle_l2(angle1, angle2): + a = angle1 - angle2 + a = (a + (torch.pi/2)) % torch.pi - (torch.pi/2) + return a ** 2 + +def diff_l2(a, b): + return (a - b) ** 2 + +def masked_l2(a, b, mask, loss_fn=diff_l2, epsilon=1e-8, entries_norm=True): + # assuming a.shape == b.shape == bs, J, Jdim, seqlen + # assuming mask.shape == bs, 1, 1, seqlen + loss = loss_fn(a, b) + loss = sum_flat(loss * mask.float()) # gives \sigma_euclidean over unmasked elements + n_entries = a.shape[1] + if len(a.shape) > 3: + n_entries *= a.shape[2] + non_zero_elements = sum_flat(mask) + if entries_norm: + # In cases the mask is per frame, and not specifying the number of entries per frame, this normalization is needed, + # Otherwise set it to False + non_zero_elements *= n_entries + # print('mask', mask.shape) + # print('non_zero_elements', non_zero_elements) + # print('loss', loss) + mse_loss_val = loss / (non_zero_elements + epsilon) # Add epsilon to avoid division by zero + # print('mse_loss_val', mse_loss_val) + return mse_loss_val + + +def masked_goal_l2(pred_goal, ref_goal, cond, all_goal_joint_names): + all_goal_joint_names_w_traj = np.append(all_goal_joint_names, 'traj') + target_joint_idx = [[np.where(all_goal_joint_names_w_traj == j)[0][0] for j in sample_joints] for sample_joints in cond['target_joint_names']] + loc_mask = torch.zeros_like(pred_goal[:,:-1], dtype=torch.bool) + for sample_idx in range(loc_mask.shape[0]): + loc_mask[sample_idx, target_joint_idx[sample_idx]] = True + loc_mask[:, -1, 1] = False # vertical joint of 'traj' is always masked out + loc_loss = masked_l2(pred_goal[:,:-1], ref_goal[:,:-1], loc_mask, entries_norm=False) + + heading_loss = masked_l2(pred_goal[:,-1:, :1], ref_goal[:,-1:, :1], cond['is_heading'].unsqueeze(1).unsqueeze(1), loss_fn=angle_l2, entries_norm=False) + + loss = loc_loss + heading_loss + return loss diff --git a/motion_diffusion_model/utils/misc.py b/motion_diffusion_model/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..afcb13fe892da0aab0bd2ab61803e81aa4d10754 --- /dev/null +++ b/motion_diffusion_model/utils/misc.py @@ -0,0 +1,74 @@ +import torch +import torch.nn as nn + + +class WeightedSum(nn.Module): + def __init__(self, num_rows): + super(WeightedSum, self).__init__() + # Initialize learnable weights + self.weights = nn.Parameter(torch.randn(num_rows)) + + def forward(self, x): + # Ensure weights are normalized (optional) + normalized_weights = self.weights / self.weights.sum() # torch.softmax(self.weights, dim=0) + # Compute the weighted sum of the rows + weighted_sum = torch.matmul(normalized_weights, x) + return weighted_sum + + +def wrapped_getattr(self, name, default=None, wrapped_member_name='model'): + ''' should be called from wrappers of model classes such as ClassifierFreeSampleModel''' + + if isinstance(self, torch.nn.Module): + # for descendants of nn.Module, name may be in self.__dict__[_parameters/_buffers/_modules] + # so we activate nn.Module.__getattr__ first. + # Otherwise, we might encounter an infinite loop + try: + attr = torch.nn.Module.__getattr__(self, name) + except AttributeError: + wrapped_member = torch.nn.Module.__getattr__(self, wrapped_member_name) + attr = getattr(wrapped_member, name, default) + else: + # the easy case, where self is not derived from nn.Module + wrapped_member = getattr(self, wrapped_member_name) + attr = getattr(wrapped_member, name, default) + return attr + + +def to_numpy(tensor): + if torch.is_tensor(tensor): + return tensor.cpu().numpy() + elif type(tensor).__module__ != 'numpy': + raise ValueError("Cannot convert {} to numpy array".format( + type(tensor))) + return tensor + + +def to_torch(ndarray): + if type(ndarray).__module__ == 'numpy': + return torch.from_numpy(ndarray) + elif not torch.is_tensor(ndarray): + raise ValueError("Cannot convert {} to torch tensor".format( + type(ndarray))) + return ndarray + + +def cleanexit(): + import sys + import os + try: + sys.exit(0) + except SystemExit: + os._exit(0) + +def load_model_wo_clip(model, state_dict): + missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) + assert len(unexpected_keys) == 0 + assert all([k.startswith('clip_model.') for k in missing_keys]) + +def freeze_joints(x, joints_to_freeze): + # Freezes selected joint *rotations* as they appear in the first frame + # x [bs, [root+n_joints], joint_dim(6), seqlen] + frozen = x.detach().clone() + frozen[:, joints_to_freeze, :, :] = frozen[:, joints_to_freeze, :, :1] + return frozen diff --git a/motion_diffusion_model/utils/model_util.py b/motion_diffusion_model/utils/model_util.py new file mode 100644 index 0000000000000000000000000000000000000000..ca8b4318842096b00a180a4e006007a66170d0a8 --- /dev/null +++ b/motion_diffusion_model/utils/model_util.py @@ -0,0 +1,132 @@ +import torch +from model.mdm import MDM +from diffusion import gaussian_diffusion as gd +from diffusion.respace import SpacedDiffusion, space_timesteps +from utils.parser_util import get_cond_mode +from data_loaders.humanml_utils import HML_EE_JOINT_NAMES + +def load_model_wo_clip(model, state_dict): + # assert (state_dict['sequence_pos_encoder.pe'][:model.sequence_pos_encoder.pe.shape[0]] == model.sequence_pos_encoder.pe).all() # TEST + # assert (state_dict['embed_timestep.sequence_pos_encoder.pe'][:model.embed_timestep.sequence_pos_encoder.pe.shape[0]] == model.embed_timestep.sequence_pos_encoder.pe).all() # TEST + del state_dict['sequence_pos_encoder.pe'] # no need to load it (fixed), and causes size mismatch for older models + del state_dict['embed_timestep.sequence_pos_encoder.pe'] # no need to load it (fixed), and causes size mismatch for older models + missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) + assert len(unexpected_keys) == 0 + assert all([k.startswith('clip_model.') or 'sequence_pos_encoder' in k for k in missing_keys]) + + +def create_model_and_diffusion(args, data): + model = MDM(**get_model_args(args, data)) + diffusion = create_gaussian_diffusion(args) + return model, diffusion + + +def get_model_args(args, data): + + # default args + clip_version = 'ViT-B/32' + action_emb = 'tensor' + cond_mode = get_cond_mode(args) + if hasattr(data.dataset, 'num_actions'): + num_actions = data.dataset.num_actions + else: + num_actions = 1 + + # SMPL defaults + data_rep = 'rot6d' + njoints = 25 + nfeats = 6 + all_goal_joint_names = [] + + if args.dataset == 'humanml': + data_rep = 'hml_vec' + njoints = 263 + nfeats = 1 + all_goal_joint_names = ['pelvis'] + HML_EE_JOINT_NAMES + elif args.dataset == 'kit': + data_rep = 'hml_vec' + njoints = 251 + nfeats = 1 + + # Compatibility with old models + if not hasattr(args, 'pred_len'): + args.pred_len = 0 + args.context_len = 0 + + emb_policy = args.__dict__.get('emb_policy', 'add') + multi_target_cond = args.__dict__.get('multi_target_cond', False) + multi_encoder_type = args.__dict__.get('multi_encoder_type', 'multi') + target_enc_layers = args.__dict__.get('target_enc_layers', 1) + + return {'modeltype': '', 'njoints': njoints, 'nfeats': nfeats, 'num_actions': num_actions, + 'translation': True, 'pose_rep': 'rot6d', 'glob': True, 'glob_rot': True, + 'latent_dim': args.latent_dim, 'ff_size': 1024, 'num_layers': args.layers, 'num_heads': 4, + 'dropout': 0.1, 'activation': "gelu", 'data_rep': data_rep, 'cond_mode': cond_mode, + 'cond_mask_prob': args.cond_mask_prob, 'action_emb': action_emb, 'arch': args.arch, + 'emb_trans_dec': args.emb_trans_dec, 'clip_version': clip_version, 'dataset': args.dataset, + 'text_encoder_type': args.text_encoder_type, + 'pos_embed_max_len': args.pos_embed_max_len, 'mask_frames': args.mask_frames, + 'pred_len': args.pred_len, 'context_len': args.context_len, 'emb_policy': emb_policy, + 'all_goal_joint_names': all_goal_joint_names, 'multi_target_cond': multi_target_cond, 'multi_encoder_type': multi_encoder_type, 'target_enc_layers': target_enc_layers, + } + + + +def create_gaussian_diffusion(args): + # default params + predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal! + steps = args.diffusion_steps + scale_beta = 1. # no scaling + timestep_respacing = '' # can be used for ddim sampling, we don't use it. + learn_sigma = False + rescale_timesteps = False + + betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta) + loss_type = gd.LossType.MSE + + if not timestep_respacing: + timestep_respacing = [steps] + + if hasattr(args, 'lambda_target_loc'): + lambda_target_loc = args.lambda_target_loc + else: + lambda_target_loc = 0. + + return SpacedDiffusion( + use_timesteps=space_timesteps(steps, timestep_respacing), + betas=betas, + model_mean_type=( + gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X + ), + model_var_type=( + ( + gd.ModelVarType.FIXED_LARGE + if not args.sigma_small + else gd.ModelVarType.FIXED_SMALL + ) + if not learn_sigma + else gd.ModelVarType.LEARNED_RANGE + ), + loss_type=loss_type, + rescale_timesteps=rescale_timesteps, + lambda_vel=args.lambda_vel, + lambda_rcxyz=args.lambda_rcxyz, + lambda_fc=args.lambda_fc, + lambda_target_loc=lambda_target_loc, + ) + +def load_saved_model(model, model_path, use_avg: bool=False): # use_avg_model + state_dict = torch.load(model_path, map_location='cpu') + # Use average model when possible + if use_avg and 'model_avg' in state_dict.keys(): + # if use_avg_model: + print('loading avg model') + state_dict = state_dict['model_avg'] + else: + if 'model' in state_dict: + print('loading model without avg') + state_dict = state_dict['model'] + else: + print('checkpoint has no avg model, loading as usual.') + load_model_wo_clip(model, state_dict) + return model \ No newline at end of file diff --git a/motion_diffusion_model/utils/parser_util.py b/motion_diffusion_model/utils/parser_util.py new file mode 100644 index 0000000000000000000000000000000000000000..ac02a9923a0abdaf015ea26b1f55ff90e6bb7cf8 --- /dev/null +++ b/motion_diffusion_model/utils/parser_util.py @@ -0,0 +1,320 @@ +from argparse import ArgumentParser +import argparse +import os +import json + + +def parse_and_load_from_model(parser): + # args according to the loaded model + # do not try to specify them from cmd line since they will be overwritten + add_data_options(parser) + add_model_options(parser) + add_diffusion_options(parser) + args = parser.parse_args() + args_to_overwrite = [] + for group_name in ['dataset', 'model', 'diffusion']: + args_to_overwrite += get_args_per_group_name(parser, args, group_name) + + # load args from model + if args.model_path != '': # if not using external results file + args = load_args_from_model(args, args_to_overwrite) + + if args.cond_mask_prob == 0: + args.guidance_param = 1 + + return apply_rules(args) + +def load_args_from_model(args, args_to_overwrite): + model_path = get_model_path_from_args() + args_path = os.path.join(os.path.dirname(model_path), 'args.json') + assert os.path.exists(args_path), 'Arguments json file was not found!' + with open(args_path, 'r') as fr: + model_args = json.load(fr) + + for a in args_to_overwrite: + if a in model_args.keys(): + setattr(args, a, model_args[a]) + + elif 'cond_mode' in model_args: # backward compitability + unconstrained = (model_args['cond_mode'] == 'no_cond') + setattr(args, 'unconstrained', unconstrained) + + else: + print('Warning: was not able to load [{}], using default value [{}] instead.'.format(a, args.__dict__[a])) + return args + +def apply_rules(args): + # For prefix completion + if args.pred_len == 0: + args.pred_len = args.context_len + + # For target conditioning + if args.lambda_target_loc > 0.: + args.multi_target_cond = True + return args + + +def get_args_per_group_name(parser, args, group_name): + for group in parser._action_groups: + if group.title == group_name: + group_dict = {a.dest: getattr(args, a.dest, None) for a in group._group_actions} + return list(argparse.Namespace(**group_dict).__dict__.keys()) + return ValueError('group_name was not found.') + +def get_model_path_from_args(): + try: + dummy_parser = ArgumentParser() + dummy_parser.add_argument('--model_path') + dummy_args, _ = dummy_parser.parse_known_args() + return dummy_args.model_path + except: + raise ValueError('model_path argument must be specified.') + + +def add_base_options(parser): + group = parser.add_argument_group('base') + group.add_argument("--cuda", default=True, type=bool, help="Use cuda device, otherwise use CPU.") + group.add_argument("--device", default=0, type=int, help="Device id to use.") + group.add_argument("--seed", default=10, type=int, help="For fixing random seed.") + group.add_argument("--batch_size", default=64, type=int, help="Batch size during training.") + group.add_argument("--train_platform_type", default='NoPlatform', choices=['NoPlatform', 'ClearmlPlatform', 'TensorboardPlatform', 'WandBPlatform'], type=str, + help="Choose platform to log results. NoPlatform means no logging.") + group.add_argument("--external_mode", default=False, type=bool, help="For backward cometability, do not change or delete.") + + +def add_diffusion_options(parser): + group = parser.add_argument_group('diffusion') + group.add_argument("--noise_schedule", default='cosine', choices=['linear', 'cosine'], type=str, + help="Noise schedule type") + group.add_argument("--diffusion_steps", default=1000, type=int, + help="Number of diffusion steps (denoted T in the paper)") + group.add_argument("--sigma_small", default=True, type=bool, help="Use smaller sigma values.") + + +def add_model_options(parser): + group = parser.add_argument_group('model') + group.add_argument("--arch", default='trans_enc', + choices=['trans_enc', 'trans_dec', 'gru'], type=str, + help="Architecture types as reported in the paper.") + group.add_argument("--text_encoder_type", default='clip', + choices=['clip', 'bert'], type=str, help="Text encoder type.") + group.add_argument("--emb_trans_dec", action='store_true', + help="For trans_dec architecture only, if true, will inject condition as a class token" + " (in addition to cross-attention).") + group.add_argument("--layers", default=8, type=int, + help="Number of layers.") + group.add_argument("--latent_dim", default=512, type=int, + help="Transformer/GRU width.") + group.add_argument("--cond_mask_prob", default=.1, type=float, + help="The probability of masking the condition during training." + " For classifier-free guidance learning.") + group.add_argument("--mask_frames", action='store_true', help="If true, will fix Rotem's bug and mask invalid frames.") + group.add_argument("--lambda_rcxyz", default=0.0, type=float, help="Joint positions loss.") + group.add_argument("--lambda_vel", default=0.0, type=float, help="Joint velocity loss.") + group.add_argument("--lambda_fc", default=0.0, type=float, help="Foot contact loss.") + group.add_argument("--lambda_target_loc", default=0.0, type=float, help="For HumanML only, when . L2 with target location.") + group.add_argument("--unconstrained", action='store_true', + help="Model is trained unconditionally. That is, it is constrained by neither text nor action. " + "Currently tested on HumanAct12 only.") + group.add_argument("--pos_embed_max_len", default=5000, type=int, + help="Pose embedding max length.") + group.add_argument("--use_ema", action='store_true', + help="If True, will use EMA model averaging.") + + + group.add_argument("--multi_target_cond", action='store_true', help="If true, enable multi-target conditioning (aka Sigal's model).") + group.add_argument("--multi_encoder_type", default='single', choices=['single', 'multi', 'split'], type=str, help="Specifies the encoder type to be used for the multi joint condition.") + group.add_argument("--target_enc_layers", default=1, type=int, help="Num target encoder layers") + + + # Prefix completion model + group.add_argument("--context_len", default=0, type=int, help="If larger than 0, will do prefix completion.") + group.add_argument("--pred_len", default=0, type=int, help="If context_len larger than 0, will do prefix completion. If pred_len will not be specified - will use the same length as context_len") + + + + +def add_data_options(parser): + group = parser.add_argument_group('dataset') + group.add_argument("--dataset", default='humanml', choices=['humanml', 'kit', 'humanact12', 'uestc'], type=str, + help="Dataset name (choose from list).") + group.add_argument("--data_dir", default="", type=str, + help="If empty, will use defaults according to the specified dataset.") + + +def add_training_options(parser): + group = parser.add_argument_group('training') + group.add_argument("--save_dir", required=True, type=str, + help="Path to save checkpoints and results.") + group.add_argument("--overwrite", action='store_true', + help="If True, will enable to use an already existing save_dir.") + group.add_argument("--lr", default=1e-4, type=float, help="Learning rate.") + group.add_argument("--weight_decay", default=0.0, type=float, help="Optimizer weight decay.") + group.add_argument("--lr_anneal_steps", default=0, type=int, help="Number of learning rate anneal steps.") + group.add_argument("--eval_batch_size", default=32, type=int, + help="Batch size during evaluation loop. Do not change this unless you know what you are doing. " + "T2m precision calculation is based on fixed batch size 32.") + group.add_argument("--eval_split", default='test', choices=['val', 'test'], type=str, + help="Which split to evaluate on during training.") + group.add_argument("--eval_during_training", action='store_true', + help="If True, will run evaluation during training.") + group.add_argument("--eval_rep_times", default=3, type=int, + help="Number of repetitions for evaluation loop during training.") + group.add_argument("--eval_num_samples", default=1_000, type=int, + help="If -1, will use all samples in the specified split.") + group.add_argument("--log_interval", default=1_000, type=int, + help="Log losses each N steps") + group.add_argument("--save_interval", default=50_000, type=int, + help="Save checkpoints and run evaluation each N steps") + group.add_argument("--num_steps", default=600_000, type=int, + help="Training will stop after the specified number of steps.") + group.add_argument("--num_frames", default=60, type=int, + help="Limit for the maximal number of frames. In HumanML3D and KIT this field is ignored.") + group.add_argument("--resume_checkpoint", default="", type=str, + help="If not empty, will start from the specified checkpoint (path to model###.pt file).") + + group.add_argument("--gen_during_training", action='store_true', + help="If True, will generate motions during training, on each save interval.") + group.add_argument("--gen_num_samples", default=3, type=int, + help="Number of samples to sample while generating") + group.add_argument("--gen_num_repetitions", default=2, type=int, + help="Number of repetitions, per sample (text prompt/action)") + group.add_argument("--gen_guidance_param", default=2.5, type=float, + help="For classifier-free sampling - specifies the s parameter, as defined in the paper.") + + group.add_argument("--avg_model_beta", default=0.9999, type=float, help="Average model beta (for EMA).") + group.add_argument("--adam_beta2", default=0.999, type=float, help="Adam beta2.") + + group.add_argument("--target_joint_names", default='DIMP_FINAL', type=str, help="Force single joint configuration by specifing the joints (coma separated). If None - will use the random mode for all end effectors.") + group.add_argument("--autoregressive", action='store_true', help="If true, and we use a prefix model will generate motions in an autoregressive loop.") + group.add_argument("--autoregressive_include_prefix", action='store_true', help="If true, include the init prefix in the output, otherwise, will drop it.") + group.add_argument("--autoregressive_init", default='data', type=str, choices=['data', 'isaac'], + help="Sets the source of the init frames, either from the dataset or isaac init poses.") + + +def add_sampling_options(parser): + group = parser.add_argument_group('sampling') + group.add_argument("--model_path", required=True, type=str, + help="Path to model####.pt file to be sampled.") + group.add_argument("--output_dir", default='', type=str, + help="Path to results dir (auto created by the script). " + "If empty, will create dir in parallel to checkpoint.") + group.add_argument("--num_samples", default=6, type=int, + help="Maximal number of prompts to sample, " + "if loading dataset from file, this field will be ignored.") + group.add_argument("--num_repetitions", default=3, type=int, + help="Number of repetitions, per sample (text prompt/action)") + group.add_argument("--guidance_param", default=2.5, type=float, + help="For classifier-free sampling - specifies the s parameter, as defined in the paper.") + + group.add_argument("--autoregressive", action='store_true', help="If true, and we use a prefix model will generate motions in an autoregressive loop.") + group.add_argument("--autoregressive_include_prefix", action='store_true', help="If true, include the init prefix in the output, otherwise, will drop it.") + group.add_argument("--autoregressive_init", default='data', type=str, choices=['data', 'isaac'], + help="Sets the source of the init frames, either from the dataset or isaac init poses.") + +def add_generate_options(parser): + group = parser.add_argument_group('generate') + group.add_argument("--motion_length", default=6.0, type=float, + help="The length of the sampled motion [in seconds]. " + "Maximum is 9.8 for HumanML3D (text-to-motion), and 2.0 for HumanAct12 (action-to-motion)") + group.add_argument("--input_text", default='', type=str, + help="Path to a text file lists text prompts to be synthesized. If empty, will take text prompts from dataset.") + group.add_argument("--dynamic_text_path", default='', type=str, + help="For the autoregressive mode only! Path to a text file lists text prompts to be synthesized. If empty, will take text prompts from dataset.") + group.add_argument("--action_file", default='', type=str, + help="Path to a text file that lists names of actions to be synthesized. Names must be a subset of dataset/uestc/info/action_classes.txt if sampling from uestc, " + "or a subset of [warm_up,walk,run,jump,drink,lift_dumbbell,sit,eat,turn steering wheel,phone,boxing,throw] if sampling from humanact12. " + "If no file is specified, will take action names from dataset.") + group.add_argument("--text_prompt", default='', type=str, + help="A text prompt to be generated. If empty, will take text prompts from dataset.") + group.add_argument("--action_name", default='', type=str, + help="An action name to be generated. If empty, will take text prompts from dataset.") + group.add_argument("--target_joint_names", default='DIMP_FINAL', type=str, help="Force single joint configuration by specifing the joints (coma separated). If None - will use the random mode for all end effectors.") + + +def add_edit_options(parser): + group = parser.add_argument_group('edit') + group.add_argument("--edit_mode", default='in_between', choices=['in_between', 'upper_body'], type=str, + help="Defines which parts of the input motion will be edited.\n" + "(1) in_between - suffix and prefix motion taken from input motion, " + "middle motion is generated.\n" + "(2) upper_body - lower body joints taken from input motion, " + "upper body is generated.") + group.add_argument("--text_condition", default='', type=str, + help="Editing will be conditioned on this text prompt. " + "If empty, will perform unconditioned editing.") + group.add_argument("--prefix_end", default=0.25, type=float, + help="For in_between editing - Defines the end of input prefix (ratio from all frames).") + group.add_argument("--suffix_start", default=0.75, type=float, + help="For in_between editing - Defines the start of input suffix (ratio from all frames).") + + +def add_evaluation_options(parser): + group = parser.add_argument_group('eval') + group.add_argument("--model_path", required=True, type=str, + help="Path to model####.pt file to be sampled.") + group.add_argument("--eval_mode", default='wo_mm', choices=['wo_mm', 'mm_short', 'debug', 'full'], type=str, + help="wo_mm (t2m only) - 20 repetitions without multi-modality metric; " + "mm_short (t2m only) - 5 repetitions with multi-modality metric; " + "debug - short run, less accurate results." + "full (a2m only) - 20 repetitions.") + group.add_argument("--autoregressive", action='store_true', help="If true, and we use a prefix model will generate motions in an autoregressive loop.") + group.add_argument("--autoregressive_include_prefix", action='store_true', help="If true, include the init prefix in the output, otherwise, will drop it.") + group.add_argument("--autoregressive_init", default='data', type=str, choices=['data', 'isaac'], + help="Sets the source of the init frames, either from the dataset or isaac init poses.") + group.add_argument("--guidance_param", default=2.5, type=float, + help="For classifier-free sampling - specifies the s parameter, as defined in the paper.") + + +def get_cond_mode(args): + if args.unconstrained: + cond_mode = 'no_cond' + elif args.dataset in ['kit', 'humanml']: + cond_mode = 'text' + else: + cond_mode = 'action' + return cond_mode + + +def train_args(): + parser = ArgumentParser() + add_base_options(parser) + add_data_options(parser) + add_model_options(parser) + add_diffusion_options(parser) + add_training_options(parser) + return apply_rules(parser.parse_args()) + + +def generate_args(): + parser = ArgumentParser() + # args specified by the user: (all other will be loaded from the model) + add_base_options(parser) + add_sampling_options(parser) + add_generate_options(parser) + args = parse_and_load_from_model(parser) + cond_mode = get_cond_mode(args) + + if (args.input_text or args.text_prompt) and cond_mode != 'text': + raise Exception('Arguments input_text and text_prompt should not be used for an action condition. Please use action_file or action_name.') + elif (args.action_file or args.action_name) and cond_mode != 'action': + raise Exception('Arguments action_file and action_name should not be used for a text condition. Please use input_text or text_prompt.') + + return args + + +def edit_args(): + parser = ArgumentParser() + # args specified by the user: (all other will be loaded from the model) + add_base_options(parser) + add_sampling_options(parser) + add_edit_options(parser) + return parse_and_load_from_model(parser) + + +def evaluation_parser(): + parser = ArgumentParser() + # args specified by the user: (all other will be loaded from the model) + add_base_options(parser) + add_evaluation_options(parser) + return parse_and_load_from_model(parser) \ No newline at end of file diff --git a/motion_diffusion_model/utils/rotation_conversions.py b/motion_diffusion_model/utils/rotation_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..210ae1f0878b3ab223ec3d51d4053751dceb47ff --- /dev/null +++ b/motion_diffusion_model/utils/rotation_conversions.py @@ -0,0 +1,552 @@ +# This code is based on https://github.com/Mathux/ACTOR.git +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# Check PYTORCH3D_LICENCE before use + +import functools +from typing import Optional + +import torch +import torch.nn.functional as F + + +""" +The transformation matrices returned from the functions in this file assume +the points on which the transformation will be applied are column vectors. +i.e. the R matrix is structured as + + R = [ + [Rxx, Rxy, Rxz], + [Ryx, Ryy, Ryz], + [Rzx, Rzy, Rzz], + ] # (3, 3) + +This matrix can be applied to column vectors by post multiplication +by the points e.g. + + points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point + transformed_points = R * points + +To apply the same matrix to points which are row vectors, the R matrix +can be transposed and pre multiplied by the points: + +e.g. + points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point + transformed_points = points * R.transpose(1, 0) +""" + + +def quaternion_to_matrix(quaternions): + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def _copysign(a, b): + """ + Return a tensor where each element has the absolute value taken from the, + corresponding element of a, with sign taken from the corresponding + element of b. This is like the standard copysign floating-point operation, + but is not careful about negative 0 and NaN. + + Args: + a: source tensor. + b: tensor whose signs will be used, of the same shape as a. + + Returns: + Tensor of the same shape as a with the signs of b. + """ + signs_differ = (a < 0) != (b < 0) + return torch.where(signs_differ, -a, a) + + +def _sqrt_positive_part(x): + """ + Returns torch.sqrt(torch.max(0, x)) + but with a zero subgradient where x is 0. + """ + ret = torch.zeros_like(x) + positive_mask = x > 0 + ret[positive_mask] = torch.sqrt(x[positive_mask]) + return ret + + +def matrix_to_quaternion(matrix): + """ + Convert rotations given as rotation matrices to quaternions. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.") + m00 = matrix[..., 0, 0] + m11 = matrix[..., 1, 1] + m22 = matrix[..., 2, 2] + o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22) + x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22) + y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22) + z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22) + o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2]) + o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0]) + o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1]) + return torch.stack((o0, o1, o2, o3), -1) + + +def _axis_angle_rotation(axis: str, angle): + """ + Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == "X": + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + if axis == "Y": + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + if axis == "Z": + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles, convention: str): + """ + Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError("Invalid input euler angles.") + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1)) + return functools.reduce(torch.matmul, matrices) + + +def _angle_from_tan( + axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool +): + """ + Extract the first or third Euler angle from the two members of + the matrix which are positive constant times its sine and cosine. + + Args: + axis: Axis label "X" or "Y or "Z" for the angle we are finding. + other_axis: Axis label "X" or "Y or "Z" for the middle axis in the + convention. + data: Rotation matrices as tensor of shape (..., 3, 3). + horizontal: Whether we are looking for the angle for the third axis, + which means the relevant entries are in the same row of the + rotation matrix. If not, they are in the same column. + tait_bryan: Whether the first and third axes in the convention differ. + + Returns: + Euler Angles in radians for each matrix in dataset as a tensor + of shape (...). + """ + + i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis] + if horizontal: + i2, i1 = i1, i2 + even = (axis + other_axis) in ["XY", "YZ", "ZX"] + if horizontal == even: + return torch.atan2(data[..., i1], data[..., i2]) + if tait_bryan: + return torch.atan2(-data[..., i2], data[..., i1]) + return torch.atan2(data[..., i2], -data[..., i1]) + + +def _index_from_letter(letter: str): + if letter == "X": + return 0 + if letter == "Y": + return 1 + if letter == "Z": + return 2 + + +def matrix_to_euler_angles(matrix, convention: str): + """ + Convert rotations given as rotation matrices to Euler angles in radians. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + convention: Convention string of three uppercase letters. + + Returns: + Euler angles in radians as tensor of shape (..., 3). + """ + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.") + i0 = _index_from_letter(convention[0]) + i2 = _index_from_letter(convention[2]) + tait_bryan = i0 != i2 + if tait_bryan: + central_angle = torch.asin( + matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0) + ) + else: + central_angle = torch.acos(matrix[..., i0, i0]) + + o = ( + _angle_from_tan( + convention[0], convention[1], matrix[..., i2], False, tait_bryan + ), + central_angle, + _angle_from_tan( + convention[2], convention[1], matrix[..., i0, :], True, tait_bryan + ), + ) + return torch.stack(o, -1) + + +def random_quaternions( + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate random quaternions representing rotations, + i.e. versors with nonnegative real part. + + Args: + n: Number of quaternions in a batch to return. + dtype: Type to return. + device: Desired device of returned tensor. Default: + uses the current device for the default tensor type. + requires_grad: Whether the resulting tensor should have the gradient + flag set. + + Returns: + Quaternions as tensor of shape (N, 4). + """ + o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad) + s = (o * o).sum(1) + o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None] + return o + + +def random_rotations( + n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate random rotations as 3x3 rotation matrices. + + Args: + n: Number of rotation matrices in a batch to return. + dtype: Type to return. + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type. + requires_grad: Whether the resulting tensor should have the gradient + flag set. + + Returns: + Rotation matrices as tensor of shape (n, 3, 3). + """ + quaternions = random_quaternions( + n, dtype=dtype, device=device, requires_grad=requires_grad + ) + return quaternion_to_matrix(quaternions) + + +def random_rotation( + dtype: Optional[torch.dtype] = None, device=None, requires_grad=False +): + """ + Generate a single random 3x3 rotation matrix. + + Args: + dtype: Type to return + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type + requires_grad: Whether the resulting tensor should have the gradient + flag set + + Returns: + Rotation matrix as tensor of shape (3, 3). + """ + return random_rotations(1, dtype, device, requires_grad)[0] + + +def standardize_quaternion(quaternions): + """ + Convert a unit quaternion to a standard form: one in which the real + part is non negative. + + Args: + quaternions: Quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Standardized quaternions as tensor of shape (..., 4). + """ + return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions) + + +def quaternion_raw_multiply(a, b): + """ + Multiply two quaternions. + Usual torch rules for broadcasting apply. + + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + + Returns: + The product of a and b, a tensor of quaternions shape (..., 4). + """ + aw, ax, ay, az = torch.unbind(a, -1) + bw, bx, by, bz = torch.unbind(b, -1) + ow = aw * bw - ax * bx - ay * by - az * bz + ox = aw * bx + ax * bw + ay * bz - az * by + oy = aw * by - ax * bz + ay * bw + az * bx + oz = aw * bz + ax * by - ay * bx + az * bw + return torch.stack((ow, ox, oy, oz), -1) + + +def quaternion_multiply(a, b): + """ + Multiply two quaternions representing rotations, returning the quaternion + representing their composition, i.e. the versor with nonnegative real part. + Usual torch rules for broadcasting apply. + + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + + Returns: + The product of a and b, a tensor of quaternions of shape (..., 4). + """ + ab = quaternion_raw_multiply(a, b) + return standardize_quaternion(ab) + + +def quaternion_invert(quaternion): + """ + Given a quaternion representing rotation, get the quaternion representing + its inverse. + + Args: + quaternion: Quaternions as tensor of shape (..., 4), with real part + first, which must be versors (unit quaternions). + + Returns: + The inverse, a tensor of quaternions of shape (..., 4). + """ + + return quaternion * quaternion.new_tensor([1, -1, -1, -1]) + + +def quaternion_apply(quaternion, point): + """ + Apply the rotation given by a quaternion to a 3D point. + Usual torch rules for broadcasting apply. + + Args: + quaternion: Tensor of quaternions, real part first, of shape (..., 4). + point: Tensor of 3D points of shape (..., 3). + + Returns: + Tensor of rotated points of shape (..., 3). + """ + if point.size(-1) != 3: + raise ValueError(f"Points are not in 3D, f{point.shape}.") + real_parts = point.new_zeros(point.shape[:-1] + (1,)) + point_as_quaternion = torch.cat((real_parts, point), -1) + out = quaternion_raw_multiply( + quaternion_raw_multiply(quaternion, point_as_quaternion), + quaternion_invert(quaternion), + ) + return out[..., 1:] + + +def axis_angle_to_matrix(axis_angle): + """ + Convert rotations given as axis/angle to rotation matrices. + + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle)) + + +def matrix_to_axis_angle(matrix): + """ + Convert rotations given as rotation matrices to axis/angle. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + """ + return quaternion_to_axis_angle(matrix_to_quaternion(matrix)) + + +def axis_angle_to_quaternion(axis_angle): + """ + Convert rotations given as axis/angle to quaternions. + + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True) + half_angles = 0.5 * angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + quaternions = torch.cat( + [torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1 + ) + return quaternions + + +def quaternion_to_axis_angle(quaternions): + """ + Convert rotations given as quaternions to axis/angle. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + """ + norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True) + half_angles = torch.atan2(norms, quaternions[..., :1]) + angles = 2 * half_angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + return quaternions[..., 1:] / sin_half_angles_over_angles + + +def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor: + """ + Converts 6D rotation representation by Zhou et al. [1] to rotation matrix + using Gram--Schmidt orthogonalisation per Section B of [1]. + Args: + d6: 6D rotation representation, of size (*, 6) + + Returns: + batch of rotation matrices of size (*, 3, 3) + + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + """ + + a1, a2 = d6[..., :3], d6[..., 3:] + b1 = F.normalize(a1, dim=-1) + b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1 + b2 = F.normalize(b2, dim=-1) + b3 = torch.cross(b1, b2, dim=-1) + return torch.stack((b1, b2, b3), dim=-2) + + +def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor: + """ + Converts rotation matrices to 6D rotation representation by Zhou et al. [1] + by dropping the last row. Note that 6D representation is not unique. + Args: + matrix: batch of rotation matrices of size (*, 3, 3) + + Returns: + 6D rotation representation, of size (*, 6) + + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + """ + return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6) diff --git a/motion_diffusion_model/utils/sampler_util.py b/motion_diffusion_model/utils/sampler_util.py new file mode 100644 index 0000000000000000000000000000000000000000..9531ddb9acc329aa281046c697687b28ffd9a64d --- /dev/null +++ b/motion_diffusion_model/utils/sampler_util.py @@ -0,0 +1,81 @@ +import numpy as np +import torch +import torch.nn as nn +from copy import deepcopy +from utils.misc import wrapped_getattr +import joblib + +# A wrapper model for Classifier-free guidance **SAMPLING** only +# https://arxiv.org/abs/2207.12598 +class ClassifierFreeSampleModel(nn.Module): + + def __init__(self, model): + super().__init__() + self.model = model # model is the actual model to run + + assert self.model.cond_mask_prob > 0, 'Cannot run a guided diffusion on a model that has not been trained with no conditions' + + # pointers to inner model + self.rot2xyz = self.model.rot2xyz + self.translation = self.model.translation + self.njoints = self.model.njoints + self.nfeats = self.model.nfeats + self.data_rep = self.model.data_rep + self.cond_mode = self.model.cond_mode + self.encode_text = self.model.encode_text + + def forward(self, x, timesteps, y=None): + cond_mode = self.model.cond_mode + assert cond_mode in ['text', 'action'] + y_uncond = deepcopy(y) + y_uncond['uncond'] = True + out = self.model(x, timesteps, y) + out_uncond = self.model(x, timesteps, y_uncond) + return out_uncond + (y['scale'].view(-1, 1, 1, 1) * (out - out_uncond)) + + def __getattr__(self, name, default=None): + # this method is reached only if name is not in self.__dict__. + return wrapped_getattr(self, name, default=None) + + +class AutoRegressiveSampler(): + def __init__(self, args, sample_fn, required_frames=196): + self.sample_fn = sample_fn + self.args = args + self.required_frames = required_frames + + def sample(self, model, shape, **kargs): + bs = shape[0] + n_iterations = (self.required_frames // self.args.pred_len) + int(self.required_frames % self.args.pred_len > 0) + samples_buf = [] + cur_prefix = deepcopy(kargs['model_kwargs']['y']['prefix']) # init with data + dynamic_text_mode = type(kargs['model_kwargs']['y']['text'][0]) == list # Text changes on the fly - prompt per prediction is provided as a list (instead of a single prompt) + if self.args.autoregressive_include_prefix: + samples_buf.append(cur_prefix) + autoregressive_shape = list(deepcopy(shape)) + autoregressive_shape[-1] = self.args.pred_len + + # Autoregressive sampling + for i in range(n_iterations): + + # Build the current kargs + cur_kargs = deepcopy(kargs) + cur_kargs['model_kwargs']['y']['prefix'] = cur_prefix + if dynamic_text_mode: + cur_kargs['model_kwargs']['y']['text'] = [s[i] for s in kargs['model_kwargs']['y']['text']] + if model.text_encoder_type == 'bert': + cur_kargs['model_kwargs']['y']['text_embed'] = (cur_kargs['model_kwargs']['y']['text_embed'][0][:, :, i], cur_kargs['model_kwargs']['y']['text_embed'][1][:, i]) + else: + raise NotImplementedError('DiP model only supports BERT text encoder at the moment. If you implement this, please send a PR!') + + # Sample the next prediction + sample = self.sample_fn(model, autoregressive_shape, **cur_kargs) + + # Buffer the sample + samples_buf.append(sample.clone()[..., -self.args.pred_len:]) + + # Update the prefix + cur_prefix = sample.clone()[..., -self.args.context_len:] + + full_batch = torch.cat(samples_buf, dim=-1)[..., :self.required_frames] # 200 -> 196 + return full_batch \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 5596dc66bc77eab1b44c12eb98482cf8d057a52d..25fd587ab318e5684db3ab42875bd32edf87cea7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ torch>=2.0 gradio -git+https://github.com/GuyTevet/motion-diffusion-model.git +numpy