|
|
|
|
|
import os |
|
|
import pandas as pd |
|
|
import pyarrow as pa |
|
|
import pyarrow.parquet as pq |
|
|
|
|
|
|
|
|
CAPTIONS_CSV = "captions.csv" |
|
|
META_CSV = "case_metadata.csv" |
|
|
GOLD_CSV = "bboxes_gold.csv" |
|
|
RATERS_CSV = "bboxes_raters.csv" |
|
|
IMAGES_DIR = "images" |
|
|
|
|
|
OUT_DIR = "data" |
|
|
OUT_PARQUET = os.path.join(OUT_DIR, "nova-v1.parquet") |
|
|
|
|
|
os.makedirs(OUT_DIR, exist_ok=True) |
|
|
|
|
|
|
|
|
captions = pd.read_csv(CAPTIONS_CSV) |
|
|
meta = pd.read_csv(META_CSV) |
|
|
gold = pd.read_csv(GOLD_CSV) |
|
|
raters = pd.read_csv(RATERS_CSV) |
|
|
|
|
|
|
|
|
for df in (captions, meta, gold, raters): |
|
|
if "case_id" in df.columns: |
|
|
df["case_id"] = df["case_id"].astype(str) |
|
|
if "scan_id" in df.columns: |
|
|
df["scan_id"] = df["scan_id"].astype(str) |
|
|
if "filename" in df.columns: |
|
|
df["filename"] = df["filename"].astype(str) |
|
|
|
|
|
|
|
|
|
|
|
images = captions[["filename", "case_id", "scan_id", "caption"]].copy() |
|
|
images["caption"] = images["caption"].fillna("").astype(str) |
|
|
|
|
|
images.rename(columns={"caption": "caption_text"}, inplace=True) |
|
|
|
|
|
|
|
|
images["image_path"] = IMAGES_DIR + "/" + images["filename"] |
|
|
images["split"] = "test" |
|
|
|
|
|
|
|
|
meta_cols = [ |
|
|
"title", "publication_date", "clinical_history", |
|
|
"differential_diagnosis", "final_diagnosis", "link" |
|
|
] |
|
|
meta = meta[["case_id"] + meta_cols].copy() |
|
|
|
|
|
|
|
|
df = images.merge(meta, on="case_id", how="left") |
|
|
|
|
|
|
|
|
|
|
|
gold_cols = ["filename", "x", "y", "width", "height"] |
|
|
gold_use = gold[["filename", "x", "y", "width", "height"]].copy() |
|
|
gold_use["source"] = "gold" |
|
|
gold_use["label"] = "" |
|
|
|
|
|
|
|
|
|
|
|
r_cols = ["filename", "rater", "x", "y", "width", "height"] |
|
|
raters_use = raters[r_cols].copy() |
|
|
raters_use.rename(columns={"rater": "source"}, inplace=True) |
|
|
raters_use["label"] = "" |
|
|
|
|
|
|
|
|
all_boxes = pd.concat([gold_use, raters_use], ignore_index=True) |
|
|
|
|
|
def pack_boxes(group: pd.DataFrame): |
|
|
|
|
|
return [ |
|
|
{ |
|
|
"x": float(row["x"]), |
|
|
"y": float(row["y"]), |
|
|
"width": float(row["width"]), |
|
|
"height": float(row["height"]), |
|
|
"source": str(row["source"]), |
|
|
"label": str(row["label"]), |
|
|
} |
|
|
for _, row in group.iterrows() |
|
|
] |
|
|
|
|
|
boxes_per_file = ( |
|
|
all_boxes |
|
|
.groupby("filename", sort=False, group_keys=False) |
|
|
.apply(pack_boxes) |
|
|
.rename("bboxes") |
|
|
.reset_index() |
|
|
) |
|
|
|
|
|
|
|
|
df = df.merge(boxes_per_file, on="filename", how="left") |
|
|
df["bboxes"] = df["bboxes"].apply(lambda v: v if isinstance(v, list) else []) |
|
|
|
|
|
|
|
|
bbox_struct = pa.struct([ |
|
|
("x", pa.float64()), |
|
|
("y", pa.float64()), |
|
|
("width", pa.float64()), |
|
|
("height", pa.float64()), |
|
|
("source", pa.string()), |
|
|
("label", pa.string()), |
|
|
]) |
|
|
|
|
|
meta_struct = pa.struct([ |
|
|
("title", pa.string()), |
|
|
("publication_date", pa.string()), |
|
|
("clinical_history", pa.string()), |
|
|
("differential_diagnosis", pa.string()), |
|
|
("final_diagnosis", pa.string()), |
|
|
("link", pa.string()), |
|
|
]) |
|
|
|
|
|
schema = pa.schema([ |
|
|
("image_path", pa.string()), |
|
|
("filename", pa.string()), |
|
|
("split", pa.string()), |
|
|
("case_id", pa.string()), |
|
|
("scan_id", pa.string()), |
|
|
("caption_text", pa.string()), |
|
|
("bboxes", pa.list_(bbox_struct)), |
|
|
("meta", meta_struct), |
|
|
]) |
|
|
for col in ["title", "publication_date", "clinical_history", "differential_diagnosis", "final_diagnosis", "link"]: |
|
|
if col in df.columns: |
|
|
df[col] = df[col].fillna("").astype(str) |
|
|
|
|
|
def to_arrow_array_list_of_struct(list_of_dicts, struct_type): |
|
|
|
|
|
|
|
|
if not list_of_dicts: |
|
|
|
|
|
return pa.array([], type=pa.list_(struct_type)) |
|
|
|
|
|
raise RuntimeError("This helper is row-wise; we will build below with pa.array on the full column.") |
|
|
|
|
|
|
|
|
meta_dicts = df[meta_cols].to_dict(orient="records") |
|
|
meta_array = pa.array(meta_dicts, type=meta_struct) |
|
|
|
|
|
|
|
|
|
|
|
bboxes_array = pa.array(df["bboxes"].tolist(), type=pa.list_(bbox_struct)) |
|
|
|
|
|
|
|
|
table = pa.Table.from_arrays( |
|
|
[ |
|
|
pa.array(df["image_path"].tolist(), type=pa.string()), |
|
|
pa.array(df["filename"].tolist(), type=pa.string()), |
|
|
pa.array(df["split"].tolist(), type=pa.string()), |
|
|
pa.array(df["case_id"].tolist(), type=pa.string()), |
|
|
pa.array(df["scan_id"].tolist(), type=pa.string()), |
|
|
pa.array(df["caption_text"].tolist(), type=pa.string()), |
|
|
bboxes_array, |
|
|
meta_array, |
|
|
], |
|
|
schema=schema |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
missing = [p for p in df["image_path"] if not os.path.exists(p)] |
|
|
if missing: |
|
|
print(f"[WARN] {len(missing)} image paths do not exist locally. First few:", missing[:5]) |
|
|
|
|
|
|
|
|
pq.write_table(table, OUT_PARQUET) |
|
|
print(f"Wrote {OUT_PARQUET} with {table.num_rows} rows.") |
|
|
|
|
|
|