SamuelM0422 commited on
Commit
7ca6d23
·
verified ·
1 Parent(s): c4ad535

Delete loading script

Browse files
Files changed (1) hide show
  1. SunDataset.py +0 -153
SunDataset.py DELETED
@@ -1,153 +0,0 @@
1
- import collections
2
- import json
3
- import os
4
-
5
- import datasets
6
-
7
-
8
- _HOMEPAGE = "https://universe.roboflow.com/samuelm0422/sundetection-bwqjs/dataset/1"
9
- _LICENSE = "CC BY 4.0"
10
- _CITATION = """\
11
- @misc{
12
- sundetection-bwqjs_dataset,
13
- title = { SunDetection Dataset },
14
- type = { Open Source Dataset },
15
- author = { SamuelM0422 },
16
- howpublished = { \\url{ https://universe.roboflow.com/samuelm0422/sundetection-bwqjs } },
17
- url = { https://universe.roboflow.com/samuelm0422/sundetection-bwqjs },
18
- journal = { Roboflow Universe },
19
- publisher = { Roboflow },
20
- year = { 2025 },
21
- month = { apr },
22
- note = { visited on 2025-04-10 },
23
- }
24
- """
25
- _CATEGORIES = ['sun']
26
- _ANNOTATION_FILENAME = "_annotations.coco.json"
27
-
28
-
29
- class SUNDATASETConfig(datasets.BuilderConfig):
30
- """Builder Config for SunDataset"""
31
-
32
- def __init__(self, data_urls, **kwargs):
33
- """
34
- BuilderConfig for SunDataset.
35
-
36
- Args:
37
- data_urls: `dict`, name to url to download the zip file from.
38
- **kwargs: keyword arguments forwarded to super.
39
- """
40
- super(SUNDATASETConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
41
- self.data_urls = data_urls
42
-
43
-
44
- class SUNDATASET(datasets.GeneratorBasedBuilder):
45
- """SunDataset object detection dataset"""
46
-
47
- VERSION = datasets.Version("1.0.0")
48
- BUILDER_CONFIGS = [
49
- SUNDATASETConfig(
50
- name="full",
51
- description="Full version of SunDataset dataset.",
52
- data_urls={
53
- "train": "https://huggingface.co/datasets/SamuelM0422/SunDataset/resolve/main/data/train.zip",
54
- "validation": "https://huggingface.co/datasets/SamuelM0422/SunDataset/resolve/main/data/valid.zip",
55
- "test": "https://huggingface.co/datasets/SamuelM0422/SunDataset/resolve/main/data/test.zip",
56
- },
57
- ),
58
- SUNDATASETConfig(
59
- name="mini",
60
- description="Mini version of SunDataset dataset.",
61
- data_urls={
62
- "train": "https://huggingface.co/datasets/SamuelM0422/SunDataset/resolve/main/data/valid-mini.zip",
63
- "validation": "https://huggingface.co/datasets/SamuelM0422/SunDataset/resolve/main/data/valid-mini.zip",
64
- "test": "https://huggingface.co/datasets/SamuelM0422/SunDataset/resolve/main/data/valid-mini.zip",
65
- },
66
- )
67
- ]
68
-
69
- def _info(self):
70
- features = datasets.Features(
71
- {
72
- "image_id": datasets.Value("int64"),
73
- "image": datasets.Image(),
74
- "width": datasets.Value("int32"),
75
- "height": datasets.Value("int32"),
76
- "objects": datasets.Sequence(
77
- {
78
- "id": datasets.Value("int64"),
79
- "area": datasets.Value("int64"),
80
- "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
81
- "category": datasets.ClassLabel(names=_CATEGORIES),
82
- }
83
- ),
84
- }
85
- )
86
- return datasets.DatasetInfo(
87
- features=features,
88
- homepage=_HOMEPAGE,
89
- citation=_CITATION,
90
- license=_LICENSE,
91
- )
92
-
93
- def _split_generators(self, dl_manager):
94
- data_files = dl_manager.download_and_extract(self.config.data_urls)
95
- return [
96
- datasets.SplitGenerator(
97
- name=datasets.Split.TRAIN,
98
- gen_kwargs={
99
- "folder_dir": data_files["train"],
100
- },
101
- ),
102
- datasets.SplitGenerator(
103
- name=datasets.Split.VALIDATION,
104
- gen_kwargs={
105
- "folder_dir": data_files["validation"],
106
- },
107
- ),
108
- datasets.SplitGenerator(
109
- name=datasets.Split.TEST,
110
- gen_kwargs={
111
- "folder_dir": data_files["test"],
112
- },
113
- ),
114
- ]
115
-
116
- def _generate_examples(self, folder_dir):
117
- def process_annot(annot, category_id_to_category):
118
- return {
119
- "id": annot["id"],
120
- "area": annot["area"],
121
- "bbox": annot["bbox"],
122
- "category": category_id_to_category[annot["category_id"]],
123
- }
124
-
125
- image_id_to_image = {}
126
- idx = 0
127
-
128
- annotation_filepath = os.path.join(folder_dir, _ANNOTATION_FILENAME)
129
- with open(annotation_filepath, "r") as f:
130
- annotations = json.load(f)
131
- category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
132
- image_id_to_annotations = collections.defaultdict(list)
133
- for annot in annotations["annotations"]:
134
- image_id_to_annotations[annot["image_id"]].append(annot)
135
- filename_to_image = {image["file_name"]: image for image in annotations["images"]}
136
-
137
- for filename in os.listdir(folder_dir):
138
- filepath = os.path.join(folder_dir, filename)
139
- if filename in filename_to_image:
140
- image = filename_to_image[filename]
141
- objects = [
142
- process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
143
- ]
144
- with open(filepath, "rb") as f:
145
- image_bytes = f.read()
146
- yield idx, {
147
- "image_id": image["id"],
148
- "image": {"path": filepath, "bytes": image_bytes},
149
- "width": image["width"],
150
- "height": image["height"],
151
- "objects": objects,
152
- }
153
- idx += 1