TrieTran commited on
Commit
c278bb8
·
verified ·
1 Parent(s): 041af29

Upload folder using huggingface_hub

Browse files
Files changed (24) hide show
  1. .gitattributes +4 -0
  2. rlds_builder/Real_Data/CITATIONS.bib +1 -0
  3. rlds_builder/Real_Data/Real_Data/real_data/1.0.0/dataset_info.json +23 -0
  4. rlds_builder/Real_Data/Real_Data/real_data/1.0.0/dataset_statistics_a9dbd4ee3dcfde17e90cbcdad0aed828455451659e0761bd7ad79e36836077d4.json +1 -0
  5. rlds_builder/Real_Data/Real_Data/real_data/1.0.0/features.json +168 -0
  6. rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00000-of-00004 +3 -0
  7. rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00001-of-00004 +3 -0
  8. rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00002-of-00004 +3 -0
  9. rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00003-of-00004 +3 -0
  10. rlds_builder/Real_Data/Real_Data_dataset_builder.py +278 -0
  11. rlds_builder/Real_Data/__init__.py +0 -0
  12. rlds_builder/Real_Data/__pycache__/LIBERO_90_dataset_builder.cpython-310.pyc +0 -0
  13. rlds_builder/Real_Data/__pycache__/LIBERO_90_dataset_builder.cpython-39.pyc +0 -0
  14. rlds_builder/Real_Data/__pycache__/LIBERO_Mem_dataset_builder.cpython-39.pyc +0 -0
  15. rlds_builder/Real_Data/__pycache__/LIBERO_Relation_dataset_builder.cpython-310.pyc +0 -0
  16. rlds_builder/Real_Data/__pycache__/LIBERO_Relation_dataset_builder.cpython-39.pyc +0 -0
  17. rlds_builder/Real_Data/__pycache__/LIBERO_Spatial_ol_dataset_builder.cpython-39.pyc +0 -0
  18. rlds_builder/Real_Data/__pycache__/Real_Data_dataset_builder.cpython-310.pyc +0 -0
  19. rlds_builder/Real_Data/__pycache__/__init__.cpython-310.pyc +0 -0
  20. rlds_builder/Real_Data/__pycache__/__init__.cpython-39.pyc +0 -0
  21. rlds_builder/Real_Data/__pycache__/conversion_utils.cpython-310.pyc +0 -0
  22. rlds_builder/Real_Data/__pycache__/conversion_utils.cpython-39.pyc +0 -0
  23. rlds_builder/Real_Data/conversion_utils.py +226 -0
  24. rlds_builder/Real_Data/example.png +3 -0
.gitattributes CHANGED
@@ -57,3 +57,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00000-of-00004 filter=lfs diff=lfs merge=lfs -text
61
+ rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00001-of-00004 filter=lfs diff=lfs merge=lfs -text
62
+ rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00002-of-00004 filter=lfs diff=lfs merge=lfs -text
63
+ rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00003-of-00004 filter=lfs diff=lfs merge=lfs -text
rlds_builder/Real_Data/CITATIONS.bib ADDED
@@ -0,0 +1 @@
 
 
1
+ // TODO(example_dataset): BibTeX citation
rlds_builder/Real_Data/Real_Data/real_data/1.0.0/dataset_info.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "citation": "// TODO(example_dataset): BibTeX citation",
3
+ "fileFormat": "tfrecord",
4
+ "moduleName": "Real_Data.Real_Data_dataset_builder",
5
+ "name": "real_data",
6
+ "releaseNotes": {
7
+ "1.0.0": "Initial release."
8
+ },
9
+ "splits": [
10
+ {
11
+ "filepathTemplate": "{DATASET}-{SPLIT}.{FILEFORMAT}-{SHARD_X_OF_Y}",
12
+ "name": "train",
13
+ "numBytes": "477887299",
14
+ "shardLengths": [
15
+ "38",
16
+ "37",
17
+ "37",
18
+ "38"
19
+ ]
20
+ }
21
+ ],
22
+ "version": "1.0.0"
23
+ }
rlds_builder/Real_Data/Real_Data/real_data/1.0.0/dataset_statistics_a9dbd4ee3dcfde17e90cbcdad0aed828455451659e0761bd7ad79e36836077d4.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"action": {"mean": [-0.00021713033493142575, 3.951489816245157e-06, -6.244335236260667e-05, 0.024315927177667618, -0.024200621992349625, 0.0001426006929250434, -0.1762954145669937], "std": [0.005905783269554377, 0.010097788646817207, 0.005760197062045336, 0.9473578929901123, 0.9439426064491272, 0.028618143871426582, 0.9843310117721558], "max": [0.0263775996863842, 0.02954130433499813, 0.02553696744143963, 4.978769779205322, 5.3642754554748535, 0.4048313796520233, 1.0], "min": [-0.029638897627592087, -0.029703686013817787, -0.024002285674214363, -4.92792272567749, -5.274268627166748, -0.44714170694351196, -1.0], "q01": [-0.018638468496501446, -0.0258210021071136, -0.012519038049504161, -4.446948285102844, -4.433915729522705, -0.08356364756822586, -1.0], "q99": [0.017138871438801287, 0.025166765898466083, 0.02048220705240963, 4.4373928689956665, 4.42347291469574, 0.08423277527093823, 1.0]}, "proprio": {"mean": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "std": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "max": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "min": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "q01": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "q99": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]}, "num_transitions": 20148, "num_trajectories": 150}
rlds_builder/Real_Data/Real_Data/real_data/1.0.0/features.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict",
3
+ "featuresDict": {
4
+ "features": {
5
+ "episode_metadata": {
6
+ "pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict",
7
+ "featuresDict": {
8
+ "features": {
9
+ "file_path": {
10
+ "pythonClassName": "tensorflow_datasets.core.features.text_feature.Text",
11
+ "text": {},
12
+ "description": "Path to the original data file."
13
+ }
14
+ }
15
+ }
16
+ },
17
+ "steps": {
18
+ "pythonClassName": "tensorflow_datasets.core.features.dataset_feature.Dataset",
19
+ "sequence": {
20
+ "feature": {
21
+ "pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict",
22
+ "featuresDict": {
23
+ "features": {
24
+ "is_first": {
25
+ "pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
26
+ "tensor": {
27
+ "shape": {},
28
+ "dtype": "bool",
29
+ "encoding": "none"
30
+ },
31
+ "description": "True on first step of the episode."
32
+ },
33
+ "action": {
34
+ "pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
35
+ "tensor": {
36
+ "shape": {
37
+ "dimensions": [
38
+ "7"
39
+ ]
40
+ },
41
+ "dtype": "float32",
42
+ "encoding": "none"
43
+ },
44
+ "description": "Robot EEF action."
45
+ },
46
+ "is_last": {
47
+ "pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
48
+ "tensor": {
49
+ "shape": {},
50
+ "dtype": "bool",
51
+ "encoding": "none"
52
+ },
53
+ "description": "True on last step of the episode."
54
+ },
55
+ "is_terminal": {
56
+ "pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
57
+ "tensor": {
58
+ "shape": {},
59
+ "dtype": "bool",
60
+ "encoding": "none"
61
+ },
62
+ "description": "True on last step of the episode if it is a terminal step, True for demos."
63
+ },
64
+ "language_instruction": {
65
+ "pythonClassName": "tensorflow_datasets.core.features.text_feature.Text",
66
+ "text": {},
67
+ "description": "Language Instruction."
68
+ },
69
+ "discount": {
70
+ "pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
71
+ "tensor": {
72
+ "shape": {},
73
+ "dtype": "float32",
74
+ "encoding": "none"
75
+ },
76
+ "description": "Discount if provided, default to 1."
77
+ },
78
+ "observation": {
79
+ "pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict",
80
+ "featuresDict": {
81
+ "features": {
82
+ "joint_state": {
83
+ "pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
84
+ "tensor": {
85
+ "shape": {
86
+ "dimensions": [
87
+ "7"
88
+ ]
89
+ },
90
+ "dtype": "float32",
91
+ "encoding": "none"
92
+ },
93
+ "description": "Robot joint angles."
94
+ },
95
+ "state": {
96
+ "pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
97
+ "tensor": {
98
+ "shape": {
99
+ "dimensions": [
100
+ "8"
101
+ ]
102
+ },
103
+ "dtype": "float32",
104
+ "encoding": "none"
105
+ },
106
+ "description": "Robot EEF state (6D pose, 2D gripper)."
107
+ },
108
+ "image": {
109
+ "pythonClassName": "tensorflow_datasets.core.features.image_feature.Image",
110
+ "image": {
111
+ "shape": {
112
+ "dimensions": [
113
+ "256",
114
+ "256",
115
+ "3"
116
+ ]
117
+ },
118
+ "dtype": "uint8",
119
+ "encodingFormat": "jpeg"
120
+ },
121
+ "description": "Main camera RGB observation."
122
+ },
123
+ "image_reasoning": {
124
+ "pythonClassName": "tensorflow_datasets.core.features.text_feature.Text",
125
+ "text": {},
126
+ "description": "scene objects as dictionary of bbox and seg index in Main camera."
127
+ },
128
+ "image_seg": {
129
+ "pythonClassName": "tensorflow_datasets.core.features.image_feature.Image",
130
+ "image": {
131
+ "shape": {
132
+ "dimensions": [
133
+ "256",
134
+ "256",
135
+ "1"
136
+ ]
137
+ },
138
+ "dtype": "uint8",
139
+ "encodingFormat": "png"
140
+ },
141
+ "description": "Main camera segmentation observation."
142
+ }
143
+ }
144
+ }
145
+ },
146
+ "reward": {
147
+ "pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
148
+ "tensor": {
149
+ "shape": {},
150
+ "dtype": "float32",
151
+ "encoding": "none"
152
+ },
153
+ "description": "Reward if provided, 1 on final step for demos."
154
+ },
155
+ "language_instruction_nouns": {
156
+ "pythonClassName": "tensorflow_datasets.core.features.text_feature.Text",
157
+ "text": {},
158
+ "description": "Language Instruction Nouns."
159
+ }
160
+ }
161
+ }
162
+ },
163
+ "length": "-1"
164
+ }
165
+ }
166
+ }
167
+ }
168
+ }
rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00000-of-00004 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c2c72cd6ec1b4753008a17d1e37a979a19c69bb8115a121a36f2c30ae4ceb3d
3
+ size 117086720
rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00001-of-00004 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db0a232256d8800e4cb3acfc26e099d046bb4ad8a5086dfa3f1c2a408dc8f192
3
+ size 119840374
rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00002-of-00004 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34d2c730e7b28cc9ebb0d22169141bbe0aa427252d0966d2b21bd5f430d1fc77
3
+ size 115451476
rlds_builder/Real_Data/Real_Data/real_data/1.0.0/real_data-train.tfrecord-00003-of-00004 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65c3a9252fbc169e3d40bce342525f3d6e123d33b6b8a2a021accb9f092d7beb
3
+ size 125511129
rlds_builder/Real_Data/Real_Data_dataset_builder.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterator, Tuple, Any
2
+
3
+ import os
4
+ import h5py
5
+ import glob
6
+ import numpy as np
7
+ import tensorflow as tf
8
+ import tensorflow_datasets as tfds
9
+ import sys
10
+ import json
11
+ from Real_Data.conversion_utils import MultiThreadedDatasetBuilder
12
+
13
+ import numpy as np
14
+
15
+ def xyxy_to_cxcywh(box):
16
+ """
17
+ Convert [x_min, y_min, x_max, y_max] → [cx, cy, w, h].
18
+
19
+ Args:
20
+ box (list, tuple, or np.ndarray): shape (4,) or (N, 4).
21
+ Returns:
22
+ np.ndarray: same shape, but converted.
23
+ """
24
+ box = np.array(box, dtype=float)
25
+
26
+ if box.ndim == 1: # single box
27
+ x_min, y_min, x_max, y_max = box
28
+ w = x_max - x_min
29
+ h = y_max - y_min
30
+ cx = x_min + w / 2
31
+ cy = y_min + h / 2
32
+ return np.array([cx, cy, w, h])
33
+
34
+ elif box.ndim == 2: # multiple boxes
35
+ x_min, y_min, x_max, y_max = box[:, 0], box[:, 1], box[:, 2], box[:, 3]
36
+ w = x_max - x_min
37
+ h = y_max - y_min
38
+ cx = x_min + w / 2
39
+ cy = y_min + h / 2
40
+ return np.stack([cx, cy, w, h], axis=-1)
41
+
42
+ else:
43
+ raise ValueError("Input must be shape (4,) or (N,4)")
44
+
45
+ def convert2texts(reasoning, image_size=256):
46
+ revised_reasoning = []
47
+ for datum in reasoning:
48
+ datum_reasoning = []
49
+ i = 0
50
+ for i in range(len(datum)):
51
+ item = datum[i]
52
+ if len(item) == 2:
53
+ seg_ind, bbox = item
54
+ bbox = xyxy_to_cxcywh((np.array(bbox))).astype(np.int32)
55
+ re_key_values = f'object_{seg_ind}:'+str(seg_ind)+','+str(bbox)+','+str(0)
56
+
57
+ datum_reasoning.append(re_key_values)
58
+ datum_reasoning = '#'.join(datum_reasoning)
59
+ revised_reasoning.append(datum_reasoning)
60
+ return revised_reasoning
61
+
62
+ def _generate_examples(paths, split, ratio=1.0) -> Iterator[Tuple[str, Any]]:
63
+ """Yields episodes for list of data paths."""
64
+ # the line below needs to be *inside* generate_examples so that each worker creates it's own model
65
+ # creating one shared model outside this function would cause a deadlock
66
+
67
+ def _parse_example(episode_path, episode_meta_data, demo_id):
68
+
69
+ # load raw data
70
+ with h5py.File(episode_path, "r") as F:
71
+ if f"demo_{demo_id}" not in F['data'].keys():
72
+ return None # skip episode if the demo doesn't exist (e.g. due to failed demo)
73
+ actions = F['data'][f"demo_{demo_id}"]["actions"][()]
74
+ states = F['data'][f"demo_{demo_id}"]["obs"]["ee_states"][()]
75
+ gripper_states = F['data'][f"demo_{demo_id}"]["obs"]["gripper_states"][()]
76
+ joint_states = F['data'][f"demo_{demo_id}"]["obs"]["joint_states"][()]
77
+ images = F['data'][f"demo_{demo_id}"]["obs"]["agentview_rgb"][()]
78
+ wrist_images = F['data'][f"demo_{demo_id}"]["obs"]["eye_in_hand_rgb"][()]
79
+ depth_images = F['data'][f"demo_{demo_id}"]["obs"]["agentview_depth"][()]
80
+ depth_wrist_images = F['data'][f"demo_{demo_id}"]["obs"]["eye_in_hand_depth"][()]
81
+ seg_images = F['data'][f"demo_{demo_id}"]["obs"]["agentview_seg"][()]
82
+ seg_wrist_images = F['data'][f"demo_{demo_id}"]["obs"]["eye_in_hand_seg"][()]
83
+
84
+ # compute language instruction
85
+ # words = raw_file_string[:-10].split("_")
86
+ # command = ''
87
+ # for w in words:
88
+ # if "SCENE" in w:
89
+ # command = ''
90
+ # continue
91
+ # command = command + w + ' '
92
+ # command = command[:-1]
93
+
94
+ object_data = episode_meta_data[f"demo_{demo_id}"]
95
+ # print(data.keys()); 1/0
96
+ image_reasonings = object_data['exo_boxes']
97
+ image_reasonings = convert2texts(image_reasonings)
98
+ wrist_image_reasonings = object_data['ego_boxes']
99
+ wrist_image_reasonings = convert2texts(wrist_image_reasonings)
100
+ command_nouns = object_data['task_nouns']
101
+ command = object_data['task_description']
102
+
103
+ # print(command)
104
+ # print(command_nouns)
105
+ # print(image_reasonings); 1/0
106
+
107
+ command_nouns = '. '.join(command_nouns)
108
+ # import cv2
109
+ # cv2.imwrite('try_rgb.png', wrist_images[0][:,::-1])
110
+ # cv2.imwrite('try_depth.png', depth_wrist_images[0][:,::-1])
111
+ # cv2.imwrite('try_seg.png', seg_wrist_images[0][:,::-1])
112
+ # print(seg_images[0][:,::-1].shape)
113
+ # assemble episode --> here we're assuming demos so we set reward to 1 at the end
114
+ episode = []
115
+ for i in range(actions.shape[0]):
116
+ # print(image_reasonings[i]); 1/0
117
+ # print(wrist_image_reasonings[i])
118
+ # print(command_nouns); 1/0
119
+ episode.append({
120
+ 'observation': {
121
+ 'image': images[i].astype(np.uint8),
122
+ 'image_seg': seg_images[i].astype(np.uint8),
123
+ 'image_reasoning': image_reasonings[i], # object 1:@segid,@bbox#object 2:@segid,@bbox
124
+
125
+ 'state': np.asarray(np.concatenate((states[i], gripper_states[i], gripper_states[i]), axis=-1), np.float32),
126
+ 'joint_state': np.asarray(np.zeros(7), dtype=np.float32),
127
+ },
128
+ 'action': np.asarray(actions[i], dtype=np.float32),
129
+ 'discount': 1.0,
130
+ 'reward': float(i == (actions.shape[0] - 1)),
131
+ 'is_first': i == 0,
132
+ 'is_last': i == (actions.shape[0] - 1),
133
+ 'is_terminal': i == (actions.shape[0] - 1),
134
+ 'language_instruction': command,
135
+ 'language_instruction_nouns': command_nouns,
136
+ })
137
+
138
+ # create output data sample
139
+ sample = {
140
+ 'steps': episode,
141
+ 'episode_metadata': {
142
+ 'file_path': episode_path
143
+ }
144
+ }
145
+
146
+ # if you want to skip an example for whatever reason, simply return None
147
+ return episode_path + f"_{demo_id}", sample
148
+
149
+ # get the meta data
150
+ meta_path = os.path.join(os.path.dirname(paths[0]), 'metainfo.json')
151
+ with open(meta_path, 'r') as file:
152
+ meta_data = json.load(file)
153
+
154
+ # for smallish datasets, use single-thread parsing
155
+ for sample in paths:
156
+ with h5py.File(sample, "r") as F:
157
+ sample_demo_ids = [key.replace('demo_', '') for key in F['data'].keys()]
158
+
159
+ task_name = os.path.basename(sample).split('/')[-1][:-10]
160
+ task_meta_data = meta_data[task_name]
161
+ task_meta_demo_ids = [key.replace('demo_', '') for key in task_meta_data.keys()]
162
+ demo_ids = list(set(sample_demo_ids) & set(task_meta_demo_ids))
163
+ n_demos = len(demo_ids)
164
+
165
+ idx = 0
166
+ tv_splitpoint = int(ratio * n_demos)
167
+ # train_data += tv_splitpoint
168
+ # val_data += n_demos - tv_splitpoint
169
+ # print('Train size', train_data, '--- Val size', val_data)
170
+
171
+ while idx < n_demos:
172
+ ret = _parse_example(sample, task_meta_data, demo_ids[idx])
173
+ assert(ret is not None)
174
+ idx += 1
175
+ if (split == 'train') and (idx > tv_splitpoint):
176
+ continue
177
+ elif (split == 'val') and (idx <= tv_splitpoint):
178
+ continue
179
+ yield ret
180
+
181
+
182
+ class RealData(MultiThreadedDatasetBuilder):
183
+ """DatasetBuilder for example dataset."""
184
+
185
+ VERSION = tfds.core.Version('1.0.0')
186
+ RELEASE_NOTES = {
187
+ '1.0.0': 'Initial release.',
188
+ }
189
+ N_WORKERS = 2 # number of parallel workers for data conversion
190
+ MAX_PATHS_IN_MEMORY = 60 # number of paths converted & stored in memory before writing to disk
191
+ # -> the higher the faster / more parallel conversion, adjust based on avilable RAM
192
+ # note that one path may yield multiple episodes and adjust accordingly
193
+ PARSE_FCN = _generate_examples # handle to parse function from file paths to RLDS episodes
194
+
195
+ def _info(self) -> tfds.core.DatasetInfo:
196
+ """Dataset metadata (homepage, citation,...)."""
197
+ return self.dataset_info_from_configs(
198
+ features=tfds.features.FeaturesDict({
199
+ 'steps': tfds.features.Dataset({
200
+ 'observation': tfds.features.FeaturesDict({
201
+ 'image': tfds.features.Image(
202
+ shape=(256, 256, 3),
203
+ dtype=np.uint8,
204
+ encoding_format='jpeg',
205
+ doc='Main camera RGB observation.',
206
+ ),
207
+
208
+
209
+ # depths
210
+ 'image_seg': tfds.features.Image(
211
+ shape=(256, 256, 1),
212
+ dtype=np.uint8,
213
+ encoding_format='png',
214
+ doc='Main camera segmentation observation.',
215
+ ),
216
+
217
+ # object-centric bboxes and seg indices
218
+ 'image_reasoning': tfds.features.Text(
219
+ doc='scene objects as dictionary of bbox and seg index in Main camera.'
220
+ ),
221
+ 'state': tfds.features.Tensor(
222
+ shape=(8,),
223
+ dtype=np.float32,
224
+ doc='Robot EEF state (6D pose, 2D gripper).',
225
+ ),
226
+ 'joint_state': tfds.features.Tensor(
227
+ shape=(7,),
228
+ dtype=np.float32,
229
+ doc='Robot joint angles.',
230
+ )
231
+
232
+ }),
233
+ 'action': tfds.features.Tensor(
234
+ shape=(7,),
235
+ dtype=np.float32,
236
+ doc='Robot EEF action.',
237
+ ),
238
+ 'discount': tfds.features.Scalar(
239
+ dtype=np.float32,
240
+ doc='Discount if provided, default to 1.'
241
+ ),
242
+ 'reward': tfds.features.Scalar(
243
+ dtype=np.float32,
244
+ doc='Reward if provided, 1 on final step for demos.'
245
+ ),
246
+ 'is_first': tfds.features.Scalar(
247
+ dtype=np.bool_,
248
+ doc='True on first step of the episode.'
249
+ ),
250
+ 'is_last': tfds.features.Scalar(
251
+ dtype=np.bool_,
252
+ doc='True on last step of the episode.'
253
+ ),
254
+ 'is_terminal': tfds.features.Scalar(
255
+ dtype=np.bool_,
256
+ doc='True on last step of the episode if it is a terminal step, True for demos.'
257
+ ),
258
+ 'language_instruction': tfds.features.Text(
259
+ doc='Language Instruction.'
260
+ ),
261
+ 'language_instruction_nouns': tfds.features.Text(
262
+ doc='Language Instruction Nouns.'
263
+ ),
264
+ }),
265
+ 'episode_metadata': tfds.features.FeaturesDict({
266
+ 'file_path': tfds.features.Text(
267
+ doc='Path to the original data file.'
268
+ ),
269
+ }),
270
+ }))
271
+
272
+ def _split_paths(self):
273
+ """Define filepaths for data splits."""
274
+ train_files = glob.glob("/cm/shared/nhatcm3/workspace/Dataset/hdf5_data/Real_Data/*.hdf5")
275
+ return {
276
+ "train": train_files,
277
+ # "val": glob.glob("../../libero_goal_no_noops/*.hdf5"),
278
+ }
rlds_builder/Real_Data/__init__.py ADDED
File without changes
rlds_builder/Real_Data/__pycache__/LIBERO_90_dataset_builder.cpython-310.pyc ADDED
Binary file (6.24 kB). View file
 
rlds_builder/Real_Data/__pycache__/LIBERO_90_dataset_builder.cpython-39.pyc ADDED
Binary file (6.5 kB). View file
 
rlds_builder/Real_Data/__pycache__/LIBERO_Mem_dataset_builder.cpython-39.pyc ADDED
Binary file (6.66 kB). View file
 
rlds_builder/Real_Data/__pycache__/LIBERO_Relation_dataset_builder.cpython-310.pyc ADDED
Binary file (6.39 kB). View file
 
rlds_builder/Real_Data/__pycache__/LIBERO_Relation_dataset_builder.cpython-39.pyc ADDED
Binary file (6.37 kB). View file
 
rlds_builder/Real_Data/__pycache__/LIBERO_Spatial_ol_dataset_builder.cpython-39.pyc ADDED
Binary file (6.71 kB). View file
 
rlds_builder/Real_Data/__pycache__/Real_Data_dataset_builder.cpython-310.pyc ADDED
Binary file (6.73 kB). View file
 
rlds_builder/Real_Data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (164 Bytes). View file
 
rlds_builder/Real_Data/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (161 Bytes). View file
 
rlds_builder/Real_Data/__pycache__/conversion_utils.cpython-310.pyc ADDED
Binary file (7.89 kB). View file
 
rlds_builder/Real_Data/__pycache__/conversion_utils.cpython-39.pyc ADDED
Binary file (7.85 kB). View file
 
rlds_builder/Real_Data/conversion_utils.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple, Any, Dict, Union, Callable, Iterable
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ import tensorflow_datasets as tfds
5
+
6
+ import itertools
7
+ from multiprocessing import Pool
8
+ from functools import partial
9
+ from tensorflow_datasets.core import download
10
+ from tensorflow_datasets.core import split_builder as split_builder_lib
11
+ from tensorflow_datasets.core import naming
12
+ from tensorflow_datasets.core import splits as splits_lib
13
+ from tensorflow_datasets.core import utils
14
+ from tensorflow_datasets.core import writer as writer_lib
15
+ from tensorflow_datasets.core import example_serializer
16
+ from tensorflow_datasets.core import dataset_builder
17
+ from tensorflow_datasets.core import file_adapters
18
+
19
+ Key = Union[str, int]
20
+ # The nested example dict passed to `features.encode_example`
21
+ Example = Dict[str, Any]
22
+ KeyExample = Tuple[Key, Example]
23
+
24
+
25
+ class MultiThreadedDatasetBuilder(tfds.core.GeneratorBasedBuilder):
26
+ """DatasetBuilder for example dataset."""
27
+ N_WORKERS = 10 # number of parallel workers for data conversion
28
+ MAX_PATHS_IN_MEMORY = 100 # number of paths converted & stored in memory before writing to disk
29
+ # -> the higher the faster / more parallel conversion, adjust based on avilable RAM
30
+ # note that one path may yield multiple episodes and adjust accordingly
31
+ PARSE_FCN = None # needs to be filled with path-to-record-episode parse function
32
+
33
+ def _split_generators(self, dl_manager: tfds.download.DownloadManager):
34
+ """Define data splits."""
35
+ split_paths = self._split_paths()
36
+ return {split: type(self).PARSE_FCN(paths=split_paths[split], split=split) for split in split_paths}
37
+
38
+ def _generate_examples(self):
39
+ pass # this is implemented in global method to enable multiprocessing
40
+
41
+ def _download_and_prepare( # pytype: disable=signature-mismatch # overriding-parameter-type-checks
42
+ self,
43
+ dl_manager: download.DownloadManager,
44
+ download_config: download.DownloadConfig,
45
+ ) -> None:
46
+ """Generate all splits and returns the computed split infos."""
47
+ assert self.PARSE_FCN is not None # need to overwrite parse function
48
+ split_builder = ParallelSplitBuilder(
49
+ split_dict=self.info.splits,
50
+ features=self.info.features,
51
+ dataset_size=self.info.dataset_size,
52
+ max_examples_per_split=download_config.max_examples_per_split,
53
+ beam_options=download_config.beam_options,
54
+ beam_runner=download_config.beam_runner,
55
+ file_format=self.info.file_format,
56
+ shard_config=download_config.get_shard_config(),
57
+ split_paths=self._split_paths(),
58
+ parse_function=type(self).PARSE_FCN,
59
+ n_workers=self.N_WORKERS,
60
+ max_paths_in_memory=self.MAX_PATHS_IN_MEMORY,
61
+ )
62
+ split_generators = self._split_generators(dl_manager)
63
+ split_generators = split_builder.normalize_legacy_split_generators(
64
+ split_generators=split_generators,
65
+ generator_fn=self._generate_examples,
66
+ is_beam=False,
67
+ )
68
+ dataset_builder._check_split_names(split_generators.keys())
69
+
70
+ # Start generating data for all splits
71
+ path_suffix = file_adapters.ADAPTER_FOR_FORMAT[
72
+ self.info.file_format
73
+ ].FILE_SUFFIX
74
+
75
+ split_info_futures = []
76
+ for split_name, generator in utils.tqdm(
77
+ split_generators.items(),
78
+ desc="Generating splits...",
79
+ unit=" splits",
80
+ leave=False,
81
+ ):
82
+ filename_template = naming.ShardedFileTemplate(
83
+ split=split_name,
84
+ dataset_name=self.name,
85
+ data_dir=self.data_path,
86
+ filetype_suffix=path_suffix,
87
+ )
88
+ future = split_builder.submit_split_generation(
89
+ split_name=split_name,
90
+ generator=generator,
91
+ filename_template=filename_template,
92
+ disable_shuffling=self.info.disable_shuffling,
93
+ )
94
+ split_info_futures.append(future)
95
+
96
+ # Finalize the splits (after apache beam completed, if it was used)
97
+ split_infos = [future.result() for future in split_info_futures]
98
+
99
+ # Update the info object with the splits.
100
+ split_dict = splits_lib.SplitDict(split_infos)
101
+ self.info.set_splits(split_dict)
102
+
103
+
104
+ class _SplitInfoFuture:
105
+ """Future containing the `tfds.core.SplitInfo` result."""
106
+
107
+ def __init__(self, callback: Callable[[], splits_lib.SplitInfo]):
108
+ self._callback = callback
109
+
110
+ def result(self) -> splits_lib.SplitInfo:
111
+ return self._callback()
112
+
113
+
114
+ def parse_examples_from_generator(paths, fcn, split_name, total_num_examples, features, serializer):
115
+ generator = fcn(paths, split_name)
116
+ outputs = []
117
+ for sample in utils.tqdm(
118
+ generator,
119
+ desc=f'Generating {split_name} examples...',
120
+ unit=' examples',
121
+ total=total_num_examples,
122
+ leave=False,
123
+ mininterval=1.0,
124
+ ):
125
+ if sample is None: continue
126
+ key, example = sample
127
+ try:
128
+ example = features.encode_example(example)
129
+ except Exception as e: # pylint: disable=broad-except
130
+ utils.reraise(e, prefix=f'Failed to encode example:\n{example}\n')
131
+ outputs.append((key, serializer.serialize_example(example)))
132
+ return outputs
133
+
134
+
135
+ class ParallelSplitBuilder(split_builder_lib.SplitBuilder):
136
+ def __init__(self, *args, split_paths, parse_function, n_workers, max_paths_in_memory, **kwargs):
137
+ super().__init__(*args, **kwargs)
138
+ self._split_paths = split_paths
139
+ self._parse_function = parse_function
140
+ self._n_workers = n_workers
141
+ self._max_paths_in_memory = max_paths_in_memory
142
+
143
+ def _build_from_generator(
144
+ self,
145
+ split_name: str,
146
+ generator: Iterable[KeyExample],
147
+ filename_template: naming.ShardedFileTemplate,
148
+ disable_shuffling: bool,
149
+ ) -> _SplitInfoFuture:
150
+ """Split generator for example generators.
151
+
152
+ Args:
153
+ split_name: str,
154
+ generator: Iterable[KeyExample],
155
+ filename_template: Template to format the filename for a shard.
156
+ disable_shuffling: Specifies whether to shuffle the examples,
157
+
158
+ Returns:
159
+ future: The future containing the `tfds.core.SplitInfo`.
160
+ """
161
+ total_num_examples = None
162
+ serialized_info = self._features.get_serialized_info()
163
+ writer = writer_lib.Writer(
164
+ serializer=example_serializer.ExampleSerializer(serialized_info),
165
+ filename_template=filename_template,
166
+ hash_salt=split_name,
167
+ disable_shuffling=disable_shuffling,
168
+ file_format=self._file_format,
169
+ shard_config=self._shard_config,
170
+ )
171
+
172
+ del generator # use parallel generators instead
173
+ paths = self._split_paths[split_name]
174
+ path_lists = chunk_max(paths, self._n_workers, self._max_paths_in_memory) # generate N file lists
175
+ print(f"Generating with {self._n_workers} workers!")
176
+ pool = Pool(processes=self._n_workers)
177
+ for i, paths in enumerate(path_lists):
178
+ print(f"Processing chunk {i + 1} of {len(path_lists)}.")
179
+ results = pool.map(
180
+ partial(
181
+ parse_examples_from_generator,
182
+ fcn=self._parse_function,
183
+ split_name=split_name,
184
+ total_num_examples=total_num_examples,
185
+ serializer=writer._serializer,
186
+ features=self._features
187
+ ),
188
+ paths
189
+ )
190
+ # write results to shuffler --> this will automatically offload to disk if necessary
191
+ print("Writing conversion results...")
192
+ for result in itertools.chain(*results):
193
+ key, serialized_example = result
194
+ writer._shuffler.add(key, serialized_example)
195
+ writer._num_examples += 1
196
+ pool.close()
197
+
198
+ print("Finishing split conversion...")
199
+ shard_lengths, total_size = writer.finalize()
200
+
201
+ split_info = splits_lib.SplitInfo(
202
+ name=split_name,
203
+ shard_lengths=shard_lengths,
204
+ num_bytes=total_size,
205
+ filename_template=filename_template,
206
+ )
207
+ return _SplitInfoFuture(lambda: split_info)
208
+
209
+
210
+ def dictlist2listdict(DL):
211
+ " Converts a dict of lists to a list of dicts "
212
+ return [dict(zip(DL, t)) for t in zip(*DL.values())]
213
+
214
+ def chunks(l, n):
215
+ """Yield n number of sequential chunks from l."""
216
+ d, r = divmod(len(l), n)
217
+ for i in range(n):
218
+ si = (d + 1) * (i if i < r else r) + d * (0 if i < r else i - r)
219
+ yield l[si:si + (d + 1 if i < r else d)]
220
+
221
+ def chunk_max(l, n, max_chunk_sum):
222
+ out = []
223
+ for _ in range(int(np.ceil(len(l) / max_chunk_sum))):
224
+ out.append(list(chunks(l[:max_chunk_sum], n)))
225
+ l = l[max_chunk_sum:]
226
+ return out
rlds_builder/Real_Data/example.png ADDED

Git LFS Details

  • SHA256: a42ed804cc473aed9abeb1786fba3ee51a66a36253ec2dc6b02cc645f6200a3f
  • Pointer size: 130 Bytes
  • Size of remote file: 82.1 kB