Datasets:

Modalities:
Image
Text
Formats:
webdataset
ArXiv:
Libraries:
Datasets
WebDataset
License:
zxooh46@uni-tuebingen.de commited on
Commit
b67123c
·
1 Parent(s): feeac65

Push frame visualization

Browse files
visualization/frames/plot_frames.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import webdataset as wds
3
+ import io
4
+ import decord
5
+ import numpy as np
6
+ import torch
7
+ import matplotlib.pyplot as plt
8
+ import glob
9
+ import cv2
10
+ from pathlib import Path
11
+ import concurrent.futures
12
+ import os
13
+ import argparse
14
+ import sys
15
+ from huggingface_hub import HfFileSystem, get_token, hf_hub_url
16
+
17
+ executor = concurrent.futures.ThreadPoolExecutor(
18
+ max_workers=None,
19
+ thread_name_prefix="JPG_Saver"
20
+ )
21
+
22
+ fs = HfFileSystem()
23
+ files = [fs.resolve_path(path) for path in fs.glob("hf://datasets/CVML-TueAI/grounding-YT-dataset/frames/*.tar")]
24
+ urls = [hf_hub_url(file.repo_id, file.path_in_repo, repo_type="dataset") for file in files]
25
+ urls = f"pipe: curl -s -L -H 'Authorization:Bearer {get_token()}' {'::'.join(urls)}"
26
+ PRED_FILE = 'random_preds.json'
27
+ OUTPUT_DIR = Path('./output_annotations')
28
+ OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
29
+
30
+ def save_annotated_frame(image_array_rgb, bbox, point, gt_action, pred_action, output_path):
31
+
32
+ COLOR_GT = (0, 150, 0) # Green
33
+ COLOR_PRED = (0, 0, 255) # Red
34
+ COLOR_BOX = (255, 0, 0) # Blue
35
+ COLOR_POINT = (0, 0, 255) # Red
36
+
37
+ if gt_action == pred_action:
38
+ COLOR_PRED = (0, 150, 0) # Make prediction green if correct
39
+
40
+ TOP_PADDING = 70 # Pixels to add for the title header
41
+ TEXT_OFFSET_X = 10
42
+
43
+ image_bgr = cv2.cvtColor(image_array_rgb, cv2.COLOR_RGB2BGR)
44
+ h, w = image_bgr.shape[:2]
45
+
46
+ final_image = np.full((h + TOP_PADDING, w, 3), 255, dtype=np.uint8)
47
+ final_image[TOP_PADDING : h + TOP_PADDING, 0:w] = image_bgr
48
+
49
+ cv2.putText(
50
+ final_image,
51
+ f"Ground Truth: {gt_action}",
52
+ (TEXT_OFFSET_X, 30), # Position (x, y)
53
+ cv2.FONT_HERSHEY_SIMPLEX, # Font
54
+ 0.8, # Font scale
55
+ COLOR_GT, # Color
56
+ 2 # Thickness
57
+ )
58
+ cv2.putText(
59
+ final_image,
60
+ f"Prediction: {str(pred_action)}", #Because pred_action can be None if not present
61
+ (TEXT_OFFSET_X, 60), # Position (x, y)
62
+ cv2.FONT_HERSHEY_SIMPLEX,
63
+ 0.8,
64
+ COLOR_PRED,
65
+ 2
66
+ )
67
+
68
+ # Bounding Box
69
+ x_min, y_min, x_max, y_max = [int(coord) for coord in bbox] # Get coordinates
70
+
71
+ # Top-left corner (x1, y1)
72
+ pt1 = (x_min, y_min + TOP_PADDING)
73
+ # Bottom-right corner (x2, y2)
74
+ pt2 = (x_max, y_max + TOP_PADDING)
75
+
76
+ cv2.rectangle(
77
+ final_image,
78
+ pt1,
79
+ pt2,
80
+ COLOR_BOX,
81
+ thickness=2
82
+ )
83
+
84
+ # Point
85
+ a, b = point
86
+ pt_center = (a, b + TOP_PADDING)
87
+
88
+ #Dot
89
+ cv2.circle(
90
+ final_image,
91
+ pt_center,
92
+ radius=3,
93
+ color=COLOR_POINT,
94
+ thickness=-1
95
+ )
96
+
97
+ #Outer cirlce
98
+ cv2.circle(
99
+ final_image,
100
+ pt_center,
101
+ radius=10,
102
+ color=(255, 255, 255),
103
+ thickness=2
104
+ )
105
+
106
+ cv2.imwrite(output_path, final_image, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
107
+ print(f"Saved annotated image to {output_path}")
108
+
109
+
110
+ def main():
111
+
112
+ dataset = (
113
+ wds.WebDataset(urls, shardshuffle=False)
114
+ .decode('torchrgb')
115
+ .to_tuple("__key__","jpg", "json")
116
+ )
117
+
118
+ parser = argparse.ArgumentParser()
119
+
120
+ parser.add_argument(
121
+ "--predictions", type=str, required=True, help="Path to json file with predictions for each clip"
122
+ )
123
+
124
+ args = parser.parse_args()
125
+
126
+ with open(args.predictions, 'r', encoding='utf-8') as f:
127
+ preds = json.load(f)
128
+
129
+ for key, image_tensor, meta in dataset:
130
+
131
+ frame_no = meta['frame'] #int
132
+ video_name = meta['video']
133
+ if preds.get(key) is not None: #frame prediction present
134
+
135
+ image_hwc = image_tensor.permute(1,2,0) #image_tensor is [C,H,W] -> change to [H,W,C]
136
+ image_scaled = image_hwc * 255.0 #int pixel values
137
+ image_numpy_uint8 = image_scaled.numpy().astype(np.uint8) #change from tensor to numpy
138
+
139
+ pred_point = preds[key].get(str(frame_no)).get('point')
140
+ pred_action = preds[key].get(str(frame_no)).get('action')
141
+
142
+ output_dir = OUTPUT_DIR / 'frames' / video_name
143
+ output_dir.mkdir(parents=True, exist_ok=True)
144
+ output_img = output_dir / f'{key}.jpg'
145
+
146
+ #image_array_rgb, bbox, point, gt_action, pred_action, output_path
147
+ executor.submit(
148
+ save_annotated_frame,
149
+ image_array_rgb=image_numpy_uint8,
150
+ bbox=meta['box'],
151
+ point = pred_point,
152
+ gt_action=meta['step_name'],
153
+ pred_action = pred_action,
154
+ output_path = output_img
155
+ )
156
+
157
+ print("Main loop finished. Waiting for file saving to complete...")
158
+ executor.shutdown(wait=True)
159
+ print("All files saved.")
160
+
161
+
162
+
163
+
164
+ if __name__ == '__main__':
165
+ main()
166
+
167
+
visualization/frames/random_preds.json ADDED
The diff for this file is too large to render. See raw diff