|
|
import cv2 |
|
|
import torch |
|
|
import torchvision |
|
|
from osdsynth.processor.wrappers.grounding_dino import get_grounding_dino_model |
|
|
from osdsynth.processor.wrappers.ram import get_tagging_model, run_tagging_model |
|
|
from osdsynth.processor.wrappers.sam import ( |
|
|
convert_detections_to_dict, |
|
|
convert_detections_to_list, |
|
|
crop_detections_with_xyxy, |
|
|
filter_detections, |
|
|
get_sam_predictor, |
|
|
get_sam_segmentation_from_xyxy, |
|
|
mask_subtract_contained, |
|
|
post_process_mask, |
|
|
sort_detections_by_area, |
|
|
) |
|
|
from osdsynth.utils.logger import SkipImageException |
|
|
from osdsynth.visualizer.som import draw_som_on_image |
|
|
from PIL import Image |
|
|
import numpy as np |
|
|
|
|
|
class SegmentImage: |
|
|
"""Class to segment the image.""" |
|
|
|
|
|
def __init__(self, cfg, logger, device, init_gdino=True, init_tagging=True, init_sam=True): |
|
|
self.cfg = cfg |
|
|
self.logger = logger |
|
|
self.device = device |
|
|
|
|
|
if init_gdino: |
|
|
|
|
|
self.grounding_dino_model = get_grounding_dino_model(cfg, device) |
|
|
else: |
|
|
self.grounding_dino_model = None |
|
|
|
|
|
if init_tagging: |
|
|
|
|
|
self.tagging_transform, self.tagging_model = get_tagging_model(cfg, device) |
|
|
else: |
|
|
self.tagging_transform = self.tagging_model = None |
|
|
|
|
|
if init_sam: |
|
|
|
|
|
self.sam_predictor = get_sam_predictor(cfg.sam_variant, device) |
|
|
else: |
|
|
self.sam_predictor = None |
|
|
|
|
|
pass |
|
|
|
|
|
def process(self, image_bgr, two_class ,plot_som=True): |
|
|
"""Segment the image.""" |
|
|
|
|
|
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) |
|
|
image_rgb_pil = Image.fromarray(image_rgb) |
|
|
|
|
|
|
|
|
|
|
|
img_tagging = image_rgb_pil.resize((384, 384)) |
|
|
img_tagging = self.tagging_transform(img_tagging).unsqueeze(0).to(self.device) |
|
|
|
|
|
|
|
|
if two_class is None: |
|
|
classes = run_tagging_model(self.cfg, img_tagging, self.tagging_model) |
|
|
else: |
|
|
classes = two_class |
|
|
|
|
|
if len(classes) == 0: |
|
|
raise SkipImageException("No foreground objects detected by tagging model.") |
|
|
|
|
|
|
|
|
detections = self.grounding_dino_model.predict_with_classes( |
|
|
image=image_bgr, |
|
|
classes=classes, |
|
|
box_threshold=self.cfg.box_threshold, |
|
|
text_threshold=self.cfg.text_threshold, |
|
|
) |
|
|
|
|
|
|
|
|
if len(detections.class_id) < 1: |
|
|
raise SkipImageException("No object detected.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
nms_idx = ( |
|
|
torchvision.ops.nms( |
|
|
torch.from_numpy(detections.xyxy), |
|
|
torch.from_numpy(detections.confidence), |
|
|
self.cfg.nms_threshold, |
|
|
) |
|
|
.numpy() |
|
|
.tolist() |
|
|
) |
|
|
|
|
|
print(f"Before NMS: {len(detections.xyxy)} detections") |
|
|
detections.xyxy = detections.xyxy[nms_idx] |
|
|
detections.confidence = detections.confidence[nms_idx] |
|
|
detections.class_id = detections.class_id[nms_idx] |
|
|
print(f"After NMS: {len(detections.xyxy)} detections") |
|
|
|
|
|
|
|
|
valid_idx = detections.class_id != -1 |
|
|
detections.xyxy = detections.xyxy[valid_idx] |
|
|
detections.confidence = detections.confidence[valid_idx] |
|
|
detections.class_id = detections.class_id[valid_idx] |
|
|
|
|
|
|
|
|
detections.mask = get_sam_segmentation_from_xyxy( |
|
|
sam_predictor=self.sam_predictor, image=image_rgb, xyxy=detections.xyxy |
|
|
) |
|
|
|
|
|
|
|
|
detections_dict = convert_detections_to_dict(detections, classes) |
|
|
|
|
|
|
|
|
detections_dict = filter_detections(self.cfg, detections_dict, image_rgb) |
|
|
|
|
|
if len(detections_dict["xyxy"]) < 1: |
|
|
raise SkipImageException("No object detected after filtering.") |
|
|
|
|
|
|
|
|
detections_dict["subtracted_mask"], mask_contained = mask_subtract_contained( |
|
|
detections_dict["xyxy"], detections_dict["mask"], th1=0.05, th2=0.05 |
|
|
) |
|
|
|
|
|
|
|
|
detections_dict = sort_detections_by_area(detections_dict) |
|
|
|
|
|
|
|
|
detections_dict = post_process_mask(detections_dict) |
|
|
|
|
|
|
|
|
detections_list = convert_detections_to_list(detections_dict, classes) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
detections_list = crop_detections_with_xyxy(self.cfg, image_rgb_pil, detections_list) |
|
|
|
|
|
detections_list = segmentImage(detections_list, image_rgb_pil) |
|
|
|
|
|
detections_list = add_index_to_class(detections_list) |
|
|
|
|
|
if two_class is not None: |
|
|
if len(two_class)==2 and len(detections_list) != 2: |
|
|
raise SkipImageException("Not all objects detected.") |
|
|
|
|
|
if len(two_class)==1 and len(detections_list) != 1: |
|
|
raise SkipImageException("Not all objects detected.") |
|
|
|
|
|
if len(two_class)==3 and len(detections_list) != 3: |
|
|
raise SkipImageException("Not all objects detected.") |
|
|
|
|
|
if len(two_class)==2: |
|
|
detections_two_class = [detections_list[0]['class_name'][:-1], detections_list[1]['class_name'][:-1]] |
|
|
if two_class[0] not in detections_two_class or two_class[1] not in detections_two_class: |
|
|
raise SkipImageException("Not all objects detected.") |
|
|
|
|
|
if len(two_class)==3: |
|
|
detections_two_class = [detections_list[0]['class_name'][:-1], detections_list[1]['class_name'][:-1], detections_list[2]['class_name'][:-1]] |
|
|
if two_class[0] not in detections_two_class or two_class[1] not in detections_two_class or two_class[2] not in detections_two_class: |
|
|
raise SkipImageException("Not all objects detected.") |
|
|
|
|
|
|
|
|
|
|
|
if plot_som: |
|
|
|
|
|
vis_som = draw_som_on_image( |
|
|
detections_dict, |
|
|
image_rgb, |
|
|
label_mode="1", |
|
|
alpha=0.4, |
|
|
anno_mode=["Mask", "Mark", "Box"], |
|
|
) |
|
|
else: |
|
|
vis_som = None |
|
|
|
|
|
|
|
|
|
|
|
return vis_som, detections_list |
|
|
|
|
|
|
|
|
def segmentImage(detections_list, image_rgb_pil): |
|
|
|
|
|
for i in range(len(detections_list)): |
|
|
image_pil = detections_list[i]['image_crop'] |
|
|
mask_pil = Image.fromarray(detections_list[i]['mask_crop']) |
|
|
|
|
|
image_rgba = image_pil.convert("RGBA") |
|
|
|
|
|
transparent_bg = Image.new("RGBA", image_rgba.size, (0, 0, 0, 0)) |
|
|
|
|
|
|
|
|
segmented_image = Image.composite( |
|
|
image_rgba, |
|
|
transparent_bg, |
|
|
mask_pil |
|
|
) |
|
|
|
|
|
detections_list[i]['image_segment'] = segmented_image |
|
|
|
|
|
return detections_list |
|
|
|
|
|
def skipbyconfidence(detections_list): |
|
|
skip_index = [] |
|
|
for i in range(len(detections_list)): |
|
|
if detections_list[i]['confidence'] < 0.3: |
|
|
skip_index.append(i) |
|
|
|
|
|
for i in skip_index[::-1]: |
|
|
del detections_list[i] |
|
|
|
|
|
return detections_list |
|
|
|
|
|
def add_bbox_and_taggingtext_to_image(image, detections_list): |
|
|
for i in range(len(detections_list)): |
|
|
bbox = detections_list[i]['xyxy'] |
|
|
label = detections_list[i]['class_name'] |
|
|
confidence = detections_list[i]['confidence'] |
|
|
|
|
|
cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 2) |
|
|
cv2.putText(image, f"{label} {confidence:.2f}", (int(bbox[0]), int((bbox[1]+bbox[3])/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) |
|
|
|
|
|
return image |
|
|
|
|
|
def add_index_to_class(detections_list): |
|
|
|
|
|
class_index = {} |
|
|
for detection in detections_list: |
|
|
class_name = detection['class_name'] |
|
|
if class_name not in class_index: |
|
|
class_index[class_name] = 0 |
|
|
else: |
|
|
class_index[class_name] += 1 |
|
|
|
|
|
detection['class_name'] = f"{class_name}{class_index[class_name]}" |
|
|
return detections_list |
|
|
|