""" Convert a single task folder with sequences into LIBERO-like demos with fields: actions, gripper_states, joint_states, robot_states, ee_states, agentview_images, eye_in_hand_images, agentview_depths, eye_in_hand_depths, agentview_segs, eye_in_hand_segs, agentview_boxes, eye_in_hand_boxes, rewards, dones Expected layout (per task): taskX/ success/ / camera_base.mp4 # agentview RGB camera_wrist.mp4 # eye-in-hand RGB trajectory.pkl # dict-like (see below) masks/ / masks/ 000000_id1.png, 000000_id2.png, 000001_id1.png, ... We infer T (timesteps) from trajectory.pkl (preferred keys: robot_gripper_pose, timestamp). We parse mask PNGs named "{frame:06d}_id{instance}.png" into a per-frame label map, and compute per-frame boxes per instance id. Trajectory .pkl keys (examples): ['robot_eef_pose', 'robot_eef_pose_vel', 'robot_joint', 'robot_joint_vel', 'robot_gripper_pose', 'timestamp', 'task_description'] Actions policy: - If 'robot_joint_vel' exists: actions = robot_joint_vel (T, DoF) - Else if 'robot_eef_pose_vel' exists: actions = robot_eef_pose_vel (T, 6/7) - Else: finite-difference of 'robot_joint' (pad last row with zeros). Depth and eye-in-hand segs: - If no depth available, we create zero arrays with the correct length and frame shape. - If only one set of masks exists (agentview), we mirror it to eye-in-hand segs for compatibility. Boxes: - Stored in metainfo JSON as lists of [x1,y1,x2,y2] per frame (pixel coords). Requires: numpy, opencv-python, h5py, pillow (PIL) """ import argparse, json, os, pickle, re, sys from dataclasses import dataclass from pathlib import Path from typing import List, Tuple, Dict, Sequence, Optional, Any import imageio import numpy as np import h5py import cv2 from PIL import Image MASK_RE = re.compile(r'^(?P\d+)_id(?P\d+)\.(?:png|jpg|jpeg|bmp)$', re.IGNORECASE) # ---------- helpers ---------- def _ensure_uint8_rgb(img: np.ndarray) -> np.ndarray: arr = np.asarray(img) if arr.ndim == 2: arr = np.stack([arr]*3, axis=-1) if arr.shape[-1] == 4: arr = arr[..., :3] if arr.dtype != np.uint8: if np.issubdtype(arr.dtype, np.floating) and arr.max() <= 1.0: arr = (arr * 255.0 + 0.5).astype(np.uint8) else: arr = np.clip(arr, 0, 255).astype(np.uint8) return arr def _label_to_color(label_map: np.ndarray, color_map: Optional[Dict[int, Tuple[int,int,int]]] = None): H, W = label_map.shape colored = np.zeros((H, W, 3), dtype=np.uint8) color_map = {} if color_map is None else dict(color_map) for lid in np.unique(label_map): if lid == 0: continue if lid not in color_map: rng = np.random.RandomState(lid * 9973 % (2**31-1)) color_map[lid] = tuple(int(x) for x in rng.randint(40, 220, size=3)) colored[label_map == lid] = color_map[lid] return colored, color_map def _overlay(rgb: np.ndarray, over_rgb: np.ndarray, alpha: float = 0.5) -> np.ndarray: out = (1.0 - alpha) * rgb.astype(np.float32) + alpha * over_rgb.astype(np.float32) return np.clip(out, 0, 255).astype(np.uint8) def _draw_bboxes(rgb: np.ndarray, bboxes: Sequence[Tuple[int, Sequence[int]]], color_map: Optional[Dict[int, Tuple[int,int,int]]] = None) -> np.ndarray: img = rgb.copy() color_map = {} if color_map is None else color_map defined_labels = {'id40': 'bottle 1', 'id20': 'bottle 2', 'id60': 'bowl 1', 'id100': 'robot', 'id80': 'bowl 1'} for seg_id, box in bboxes: x, y, x2, y2 = [int(v) for v in box] if seg_id not in color_map: rng = np.random.RandomState(seg_id * 9973 % (2**31-1)) color_map[seg_id] = tuple(int(x) for x in rng.randint(40, 220, size=3)) bgr = color_map[seg_id][::-1] cv2.rectangle(img, (x, y), (x2, y2), bgr, 2) label = defined_labels[f"id{seg_id}"] (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1) cv2.rectangle(img, (x, y - th - 4), (x + tw + 4, y), bgr, -1) cv2.putText(img, label, (x + 2, y - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA) return img # ---------- main ---------- def save_annotation_video_imageio( agentview_images: List[np.ndarray], agentview_segs: List[np.ndarray], agentview_bboxes: List[List[Tuple[int, Sequence[int]]]], out_path: str, fps: int = 20, resize: Optional[Tuple[int,int]] = None, seg_alpha: float = 0.5, layout: str = "hstack" ) -> str: """Save annotated rollout video with raw | bbox | seg-overlay panels using imageio.""" assert len(agentview_images) == len(agentview_segs) == len(agentview_bboxes) T = len(agentview_images) if T == 0: raise ValueError("No frames to render") imgs = [_ensure_uint8_rgb(f) for f in agentview_images] segs = [np.asarray(s, dtype=np.int32) for s in agentview_segs] H, W = imgs[0].shape[:2] if resize is not None: W, H = resize imgs = [cv2.resize(im, (W, H), interpolation=cv2.INTER_LINEAR) for im in imgs] segs = [cv2.resize(s, (W, H), interpolation=cv2.INTER_NEAREST) for s in segs] else: imgs = [cv2.resize(im, (W, H), interpolation=cv2.INTER_LINEAR) if im.shape[:2] != (H, W) else im for im in imgs] segs = [cv2.resize(s, (W, H), interpolation=cv2.INTER_NEAREST) if s.shape != (H, W) else s for s in segs] color_map: Dict[int, Tuple[int,int,int]] = {} def compose(t: int) -> np.ndarray: raw = imgs[t] box_img = _draw_bboxes(raw, agentview_bboxes[t], color_map=color_map) seg_col, cm2 = _label_to_color(segs[t], color_map=color_map) color_map.update(cm2) seg_overlay = _overlay(raw, seg_col, alpha=seg_alpha) if layout == "hstack": return np.concatenate([raw, box_img, seg_overlay], axis=1) else: # grid top = np.concatenate([raw, box_img], axis=1) bot = np.concatenate([seg_overlay, seg_col], axis=1) return np.concatenate([top, bot], axis=0) # --- Use imageio.get_writer --- with imageio.get_writer(out_path, fps=fps, codec="libx264") as writer: for t in range(T): frame = compose(t) writer.append_data(frame) # frame must be (H,W,3) uint8 return out_path def natural_key(s: str): return [int(t) if t.isdigit() else t.lower() for t in re.split(r"(\d+)", s)] def process_gripper_pose(robot_gripper_pose): raw = np.array(robot_gripper_pose) # shape (T,) # binary states (open=1, closed=0) state = raw.astype(np.int32) # deltas using "previous" rule delta = np.zeros_like(state[:-1]) prev = -1 for t in range(0, len(state)-1): if state[t] != state[t+1]: delta[t] = 1 if state[t] < state[t+1] else -1 prev = delta[t] else: delta[t] = prev # carry forward previous action return delta def process_video_rgb(path: Path) -> List[np.ndarray]: if cv2 is None: raise RuntimeError("OpenCV not available. Please install opencv-python.") cap = cv2.VideoCapture(str(path)) if not cap.isOpened(): raise RuntimeError(f"Cannot open video: {path}") frames = [] while True: ok, frame = cap.read() if not ok: break # Resize to 256x256 and convert BGR->RGB frame = cv2.resize(frame, (256, 256), interpolation=cv2.INTER_LINEAR) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frames.append(frame) cap.release() return frames def parse_masks_dir(H, W, masks_dir: Path) -> Dict[int, Dict[int, np.ndarray]]: """ Return nested dict: frame_idx -> {inst_id -> binary mask (H,W,1)} """ out: Dict[int, Dict[int, np.ndarray]] = {} for f in sorted(masks_dir.iterdir(), key=lambda x: natural_key(x.name)): if not f.is_file(): continue m = MASK_RE.match(f.name) if not m: continue frame = int(m.group("frame")) inst = int(m.group("inst")) arr = np.array(Image.open(f).convert("L").resize((W, H))) # (H,W) grayscale bin_mask = (arr > 0).astype(np.uint8)[..., None] # (H,W,1) out.setdefault(frame, {})[inst] = bin_mask return out def labelmap_and_boxes(H, W, per_inst: Dict[int, np.ndarray]) -> Tuple[np.ndarray, List[List[int]]]: """ From {inst_id -> (H,W,1) mask}, build label map (H,W) with labels 30..K*30, and compute boxes as [x1,y1,x2,y2] for each instance (label>0), in order of inst_id. Returns (labelmap, boxes) """ if not per_inst: return np.zeros((H,W,1), dtype=np.int32), [] # Determine shape labelmap = np.zeros((H,W,1), dtype=np.int32) boxes: List[List[int]] = [] # Sort instances for stable order for idx, inst_id in enumerate(sorted(per_inst.keys())): m = per_inst[inst_id][..., 0].astype(bool) label = (idx + 1)*20 # 0 reserved as background labelmap[m] = label # Bounding box ys, xs = np.where(m) if len(xs) == 0 or len(ys) == 0: pass else: x1, x2 = int(xs.min()), int(xs.max()) y1, y2 = int(ys.min()), int(ys.max()) boxes.append([label, [x1, y1, x2, y2]]) return labelmap, boxes def detect_noops_with_gripper_window( actions: np.ndarray, gripper_col: int = -1, tol: float = 1e-6, window: int = 6, ): """ Return a boolean vector is_noop[T] where True marks a no-op step. A step is no-op if (a) all non-gripper dims are ~0 (|x| 0, "actions must be (T, D)" T, D = a.shape # 1) movement no-op: all non-gripper dims are near zero if gripper_col < 0: g_idx = D + gripper_col else: g_idx = gripper_col assert 0 <= g_idx < D if D > 1: move = np.concatenate([a[:, :g_idx], a[:, g_idx+1:]], axis=1) movement_noop = np.all(np.abs(move) < tol, axis=1) else: movement_noop = np.ones(T, dtype=bool) # only gripper present # 2) gripper activity window: detect state changes and mark window frames g = a[:, g_idx] # Convert to binary state: open=1, closed=0 (by sign/threshold) # Works for {-1,0,1} or continuous values (e.g., widths). state = (g > 0).astype(np.int8) # Change points where state flips changes = np.flatnonzero(np.diff(state, prepend=state[0]) != 0) active_gripper_window = np.zeros(T, dtype=bool) for t0 in changes: t1 = min(t0 + window, T) active_gripper_window[t0:t1] = True # Final no-op = movement_noop and NOT in gripper activity window is_noop = movement_noop & (~active_gripper_window) return is_noop, active_gripper_window def process_sequence(seq_name: str, task_dir: Path, out_dir: Path, sequence_rename: Path): s_dir = task_dir / "success" / seq_name m_dir = task_dir / "masks" / seq_name / "masks" # --- Load trajectory --- pkl_path = s_dir / "trajectory.pkl" with open(pkl_path, "rb") as f: traj = pickle.load(f) task_description = traj['task_description'].lower().replace('.', '') T = len(traj['robot_eef_pose']) - 1 delta_eef = traj['robot_eef_pose'][1:,:] - traj['robot_eef_pose'][:-1,:] delta_gripper = process_gripper_pose(traj['robot_gripper_pose']) delta_gripper = delta_gripper.reshape(T, 1) actions = np.concatenate([delta_eef, delta_gripper], axis=1) # --- Read videos as RGB --- base_vid = s_dir / "camera_base.mp4" agentview_images = process_video_rgb(base_vid) agentview_images = agentview_images[:T] H, W, _ = agentview_images[0].shape # --- Parse masks into label maps + boxes --- per_frame = parse_masks_dir(H, W, m_dir) agentview_segs = [] agentview_bboxes = [] for t, inst_dict in per_frame.items(): if t >= T: continue labelmap, boxes = labelmap_and_boxes(H, W, inst_dict) if labelmap.size == 0: # in case masks are missing continue if (labelmap.shape[0] != H) or (labelmap.shape[1] != W): # Resize nearest to match video shape labelmap = np.array(Image.fromarray(labelmap.astype(np.int32)).resize((W, H), resample=Image.NEAREST)) agentview_segs.append(labelmap) agentview_bboxes.append(boxes) # save_annotation_video_imageio( # agentview_images, agentview_segs, agentview_bboxes, # out_path="annotations.mp4", fps=20, resize=(256,256) # ) # 1/0 # print(len(agentview_images)) # print(len(agentview_segs)) # print(len(agentview_bboxes)) # print(len(actions)) # print(actions); # is_noop, active_win = detect_noops_with_gripper_window(actions, gripper_col=-1, tol=1e-5, window=6) data = { "episode_key": sequence_rename, "agentview_images": agentview_images, "agentview_segs": agentview_segs, "agentview_boxes": agentview_bboxes, "actions": actions, "task_description": task_description, } return data def write_episode( out_dir: str, task_name: str, episode: Dict[str, Any], ): """ { "episode_key": "20250711-13h_52m_58s", "agentview_images": [...], # list[(H,W,3) uint8] "agentview_segs": [...], # list[(H,W) int] "agentview_boxes": [...], # list[list[(id, [x,y,w,h])]] "actions": np.ndarray or None, # (T,D) "task_description": "string", # optional }, """ episode_key = episode["episode_key"] h5_filename = f"{task_name}_{episode_key}.hdf5" meta_filename = f"{task_name}_{episode_key}_metainfo.json" h5_path = os.path.join(out_dir, h5_filename) meta_path = os.path.join(out_dir, meta_filename) # Load or start metainfo (single JSON for all episodes) if os.path.exists(meta_path): with open(meta_path, "r") as f: metainfo = json.load(f) else: metainfo = {task_name: {}} with h5py.File(h5_path, "a") as f: # append if file already exists root = f.require_group("data") ep = episode episode_key = ep["episode_key"] agentview_images = ep["agentview_images"] agentview_segs = ep["agentview_segs"] agentview_boxes = ep["agentview_boxes"] actions = ep.get("actions", None) task_description = ep.get("task_description", "") # --- lengths & alignment --- lens = [len(agentview_images), len(agentview_segs), len(agentview_boxes)] if actions is not None: lens.append(len(actions)) T = min(l for l in lens if l > 0) assert T > 0, f"[{episode_key}] nothing to write" agentview_images = agentview_images[:T] agentview_segs = agentview_segs[:T] agentview_boxes = agentview_boxes[:T] if actions is None: actions = np.zeros((T, 1), dtype=np.float32) else: actions = np.asarray(actions)[:T] # --- stack visuals --- agentview_rgb = np.stack(agentview_images, axis=0) # (T,H,W,3) agentview_seg = np.stack([np.asarray(s, dtype=np.int32) for s in agentview_segs], axis=0) # (T,H,W) _, H, W, _ = agentview_seg.shape # --- placeholders for missing streams/states --- eye_in_hand_rgb = np.zeros_like(agentview_rgb, dtype=np.uint8) agentview_depth = np.zeros((T, H, W), dtype=np.float32) eye_in_hand_depth = np.zeros((T, H, W), dtype=np.float32) eye_in_hand_seg = np.zeros((T, H, W), dtype=np.int32) gripper_states = np.zeros((T, 1), dtype=np.float32) joint_states = np.zeros((T, 0), dtype=np.float32) ee_states = np.zeros((T, 6), dtype=np.float32) # [pos(3), ori(3)] robot_states = np.zeros((T, 0), dtype=np.float32) dones = np.zeros(T, dtype=np.uint8); dones[-1] = 1 rewards = np.zeros(T, dtype=np.uint8); rewards[-1] = 1 # --- create / overwrite episode group --- if episode_key in root: del root[episode_key] # clean if re-writing ep_grp = root.create_group(episode_key) obs_grp = ep_grp.create_group("obs") # states obs_grp.create_dataset("gripper_states", data=gripper_states) obs_grp.create_dataset("joint_states", data=joint_states) obs_grp.create_dataset("ee_states", data=ee_states) obs_grp.create_dataset("ee_pos", data=ee_states[:, :3]) obs_grp.create_dataset("ee_ori", data=ee_states[:, 3:]) # visuals obs_grp.create_dataset("agentview_rgb", data=agentview_rgb) obs_grp.create_dataset("eye_in_hand_rgb", data=eye_in_hand_rgb) obs_grp.create_dataset("agentview_depth", data=agentview_depth) obs_grp.create_dataset("eye_in_hand_depth", data=eye_in_hand_depth) obs_grp.create_dataset("agentview_seg", data=agentview_seg) obs_grp.create_dataset("eye_in_hand_seg", data=eye_in_hand_seg) # top-level (episode) ep_grp.create_dataset("actions", data=actions) ep_grp.create_dataset("robot_states", data=robot_states) ep_grp.create_dataset("rewards", data=rewards) ep_grp.create_dataset("dones", data=dones) # --- update metainfo JSON for this episode --- if task_name not in metainfo: metainfo[task_name] = {} if episode_key not in metainfo[task_name]: metainfo[task_name][episode_key] = {} metainfo[task_name][episode_key].update({ "success": True, "initial_state": robot_states[0].tolist() if len(robot_states) else [], "task_nouns": [], # fill if you want "task_description": task_description, "exo_boxes": agentview_boxes, # per-frame boxes you provided "ego_boxes": [[] for _ in range(T)], # none available }) # write/merge metainfo once at the end with open(meta_path, "w") as f: json.dump(metainfo, f, indent=2) return {"hdf5": h5_path, "metainfo": meta_path} def main(): p = argparse.ArgumentParser(description="Convert sequences to LIBERO-like demos.") p.add_argument("--task_dir", type=str, help="Path to task folder (contains success/ and masks/).") p.add_argument("--out_root", type=str, required=True, help="Target directory where /_.hdf5 is written.") args = p.parse_args() task_dir = Path(args.task_dir).expanduser().resolve() task_name = task_dir.name out_root = Path(args.out_root).expanduser().resolve() out_root.mkdir(parents=True, exist_ok=True) success_dir = task_dir / "success" masks_dir = task_dir / "masks" if not success_dir.is_dir() or not masks_dir.is_dir(): print("[ERROR] task_dir must contain 'success/' and 'masks/'") sys.exit(1) success_seqs = {d.name for d in success_dir.iterdir() if d.is_dir()} mask_seqs = {d.name for d in masks_dir.iterdir() if d.is_dir()} seqs = sorted(list(success_seqs & mask_seqs), key=natural_key) results = [] from tqdm import tqdm for i, name in tqdm(enumerate(seqs)): info = process_sequence(name, task_dir, out_root, sequence_rename=f'demo_{i+1}') write_episode( out_dir=args.out_root, task_name=info['task_description'], episode=info, ) # # Write a small manifest JSON # manifest = {"task": task_name, "outputs": results} # (out_root / f"{task_name}_manifest.json").write_text(json.dumps(manifest, indent=2)) # print(f"[DONE] Manifest saved to {out_root / (task_name + '_manifest.json')}") if __name__ == "__main__": main()