MSCOCO2014 / README.md
JustinLeeCEO's picture
Update README.md
734e8a6 verified
metadata
dataset_info:
  features:
    - name: images
      list: image
    - name: problem
      dtype: string
    - name: answer
      list: string
    - name: image_id
      dtype: string
  splits:
    - name: train
      num_bytes: 4811206356
      num_examples: 10000
    - name: val
      num_bytes: 730780590.5
      num_examples: 1500
    - name: test
      num_bytes: 721301664.5
      num_examples: 1500
  download_size: 6261244811
  dataset_size: 6263288611
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/train-*
      - split: val
        path: data/val-*
      - split: test
        path: data/test-*
license: mit
task_categories:
  - text-generation
language:
  - en

This dataset was converted from MSCOCO 2014 aiming at adapting the COCO dataset to EasyR1 using the following script.

NOTE:

  1. This dataset only use COCO's segmentation data and caption data from its trainset and valset.
  2. The first N_val samples of original valset act as new valset.
  3. The last N_test samples of original valset act as new testset.
import os
import json
from datasets import Dataset, DatasetDict, Sequence
from datasets import Image as ImageData
from PIL import Image

N_train = 10000   # Limit to first 10000 images
N_val = 1500   # Limit to first 1500 images
N_test = 1500  # Limit to first 1500 images
MSCOCO_PATH = "/share/liyilin-nfs/datasets/MSCOCO"

def generate_train_data(data_path: str, instances: dict, captions: dict):
    i = 0
    for fname in os.listdir(data_path):
        if i == N_train:
            break
        i += 1
        if fname.lower().endswith(('.jpg', '.jpeg', '.png')):
            image_id = os.path.splitext(fname)[0][-10:].lstrip("0")
            image = Image.open(os.path.join(data_path, fname)).convert("RGB")
            
            img_instances = instances.get(image_id, [])
            img_captions = captions.get(image_id, [])
            
            yield {
                "images": [image],
                "problem": "<image>Provide a brief description of the given image.",
                "answer": img_captions,
                "image_id": image_id
            }

def generate_val_data(data_path: str, instances: dict, captions: dict):
    fnames = sorted([f for f in os.listdir(data_path) if f.lower().endswith(('.jpg', '.jpeg', '.png'))])
    for fname in fnames[:N_val]:
        image_id = os.path.splitext(fname)[0][-10:].lstrip("0")
        image = Image.open(os.path.join(data_path, fname)).convert("RGB")
        img_instances = instances.get(image_id, [])
        img_captions = captions.get(image_id, [])
        yield {
            "images": [image],
            "problem": "<image>Provide a brief description of the given image.",
            "answer": img_captions,
            "image_id": image_id
        }

def generate_test_data(data_path: str, instances: dict, captions: dict):
    fnames = sorted([f for f in os.listdir(data_path) if f.lower().endswith(('.jpg', '.jpeg', '.png'))])
    for fname in fnames[-N_test:]:
        image_id = os.path.splitext(fname)[0][-10:].lstrip("0")
        image = Image.open(os.path.join(data_path, fname)).convert("RGB")
        img_instances = instances.get(image_id, [])
        img_captions = captions.get(image_id, [])
        yield {
            "images": [image],
            "problem": "<image>Provide a brief description of the given image.",
            "answer": img_captions,
            "image_id": image_id
        }

def load_instances_and_captions(instances_json_path, captions_json_path):
    with open(instances_json_path, 'r') as f:
        instances_data = json.load(f)
    with open(captions_json_path, 'r') as f:
        captions_data = json.load(f)

    instances_map = {}
    for ann in instances_data['annotations']:
        image_id = str(ann['image_id'])
        category_id = ann['category_id']
        if image_id not in instances_map:
            instances_map[image_id] = []
        instances_map[image_id].append(category_id)
    captions_map = {}
    for ann in captions_data['annotations']:
        image_id = str(ann['image_id'])
        caption = ann['caption']
        if image_id not in captions_map:
            captions_map[image_id] = []
        captions_map[image_id].append(caption)
    
    return instances_map, captions_map

def main():
    train_path = f"{MSCOCO_PATH}/train2014"
    val_path = f"{MSCOCO_PATH}/val2014"

    train_instances_json = f"{MSCOCO_PATH}/annotations/instances_train2014.json"
    val_instances_json = f"{MSCOCO_PATH}/annotations/instances_val2014.json"
    train_captions_json = f"{MSCOCO_PATH}/annotations/captions_train2014.json"
    val_captions_json = f"{MSCOCO_PATH}/annotations/captions_val2014.json"

    train_instances_map, train_captions_map = load_instances_and_captions(train_instances_json, train_captions_json)
    val_instances_map, val_captions_map = load_instances_and_captions(val_instances_json, val_captions_json)

    trainset = Dataset.from_generator(
        generate_train_data, 
        gen_kwargs={"data_path": train_path, "instances": train_instances_map, "captions": train_captions_map}
    )
    valset = Dataset.from_generator(
        generate_val_data, 
        gen_kwargs={"data_path": val_path, "instances": val_instances_map, "captions": val_captions_map}
    )
    testset = Dataset.from_generator(
        generate_test_data, 
        gen_kwargs={"data_path": val_path, "instances": val_instances_map, "captions": val_captions_map}
    )

    dataset = DatasetDict({"train": trainset, "val": valset, "test": testset}).cast_column("images", Sequence(ImageData()))
    dataset.push_to_hub("JustinLeeCEO/MSCOCO2014")
    print("Successfully pushed the dataset to HuggingFace!")

if __name__ == "__main__":
    main()