File size: 8,915 Bytes
7d9e4fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
import cv2
import torch
import torchvision
from osdsynth.processor.wrappers.grounding_dino import get_grounding_dino_model
from osdsynth.processor.wrappers.ram import get_tagging_model, run_tagging_model
from osdsynth.processor.wrappers.sam import (
convert_detections_to_dict,
convert_detections_to_list,
crop_detections_with_xyxy,
filter_detections,
get_sam_predictor,
get_sam_segmentation_from_xyxy,
mask_subtract_contained,
post_process_mask,
sort_detections_by_area,
)
from osdsynth.utils.logger import SkipImageException
from osdsynth.visualizer.som import draw_som_on_image
from PIL import Image
import numpy as np
class SegmentImage:
"""Class to segment the image."""
def __init__(self, cfg, logger, device, init_gdino=True, init_tagging=True, init_sam=True):
self.cfg = cfg
self.logger = logger
self.device = device
if init_gdino:
# Initialize the Grounding Dino Model
self.grounding_dino_model = get_grounding_dino_model(cfg, device)
else:
self.grounding_dino_model = None
if init_tagging:
# Initialize the tagging Model
self.tagging_transform, self.tagging_model = get_tagging_model(cfg, device)
else:
self.tagging_transform = self.tagging_model = None
if init_sam:
# Initialize the SAM Model
self.sam_predictor = get_sam_predictor(cfg.sam_variant, device)
else:
self.sam_predictor = None
pass
def process(self, image_bgr, two_class ,plot_som=True):
"""Segment the image."""
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
image_rgb_pil = Image.fromarray(image_rgb)
# image_rgb_pil.save('tmp.png')
img_tagging = image_rgb_pil.resize((384, 384))
img_tagging = self.tagging_transform(img_tagging).unsqueeze(0).to(self.device)
# Tag2Text
if two_class is None:
classes = run_tagging_model(self.cfg, img_tagging, self.tagging_model)
else:
classes = two_class
if len(classes) == 0:
raise SkipImageException("No foreground objects detected by tagging model.")
# Using GroundingDINO to detect and SAM to segment
detections = self.grounding_dino_model.predict_with_classes(
image=image_bgr, # This function expects a BGR image...
classes=classes,
box_threshold=self.cfg.box_threshold,
text_threshold=self.cfg.text_threshold,
)
if len(detections.class_id) < 1:
raise SkipImageException("No object detected.")
# Non-maximum suppression
nms_idx = (
torchvision.ops.nms(
torch.from_numpy(detections.xyxy),
torch.from_numpy(detections.confidence),
self.cfg.nms_threshold,
)
.numpy()
.tolist()
)
print(f"Before NMS: {len(detections.xyxy)} detections")
detections.xyxy = detections.xyxy[nms_idx]
detections.confidence = detections.confidence[nms_idx]
detections.class_id = detections.class_id[nms_idx]
print(f"After NMS: {len(detections.xyxy)} detections")
# Somehow some detections will have class_id=-1, remove them
valid_idx = detections.class_id != -1
detections.xyxy = detections.xyxy[valid_idx]
detections.confidence = detections.confidence[valid_idx]
detections.class_id = detections.class_id[valid_idx]
# Segment Anything
detections.mask = get_sam_segmentation_from_xyxy(
sam_predictor=self.sam_predictor, image=image_rgb, xyxy=detections.xyxy
)
# Convert the detection to a dict. Elements are np.ndarray
detections_dict = convert_detections_to_dict(detections, classes)
# Filter out the objects based on various criteria
detections_dict = filter_detections(self.cfg, detections_dict, image_rgb)
if len(detections_dict["xyxy"]) < 1:
raise SkipImageException("No object detected after filtering.")
# Subtract the mask of bounding boxes that are contained by it
detections_dict["subtracted_mask"], mask_contained = mask_subtract_contained(
detections_dict["xyxy"], detections_dict["mask"], th1=0.05, th2=0.05
)
# Sort the dets by area
detections_dict = sort_detections_by_area(detections_dict)
# Add RLE to dict
detections_dict = post_process_mask(detections_dict)
# Convert the detection to a list. Each element is a dict
detections_list = convert_detections_to_list(detections_dict, classes)
# Skip objects with confidence lower than 0.4
# detections_list = skipbyconfidence(detections_list)
detections_list = crop_detections_with_xyxy(self.cfg, image_rgb_pil, detections_list)
detections_list = segmentImage(detections_list, image_rgb_pil)
detections_list = add_index_to_class(detections_list)
if two_class is not None:
if len(two_class)==2 and len(detections_list) != 2:
raise SkipImageException("Not all objects detected.")
if len(two_class)==1 and len(detections_list) != 1:
raise SkipImageException("Not all objects detected.")
if len(two_class)==3 and len(detections_list) != 3:
raise SkipImageException("Not all objects detected.")
if len(two_class)==2:
detections_two_class = [detections_list[0]['class_name'][:-1], detections_list[1]['class_name'][:-1]]
if two_class[0] not in detections_two_class or two_class[1] not in detections_two_class:
raise SkipImageException("Not all objects detected.")
if len(two_class)==3:
detections_two_class = [detections_list[0]['class_name'][:-1], detections_list[1]['class_name'][:-1], detections_list[2]['class_name'][:-1]]
if two_class[0] not in detections_two_class or two_class[1] not in detections_two_class or two_class[2] not in detections_two_class:
raise SkipImageException("Not all objects detected.")
if plot_som:
# Visualize with SoM
vis_som = draw_som_on_image(
detections_dict,
image_rgb,
label_mode="1",
alpha=0.4,
anno_mode=["Mask", "Mark", "Box"],
)
else:
vis_som = None
return vis_som, detections_list
# Copy the object area from the original image to a transparent background
def segmentImage(detections_list, image_rgb_pil):
for i in range(len(detections_list)):
image_pil = detections_list[i]['image_crop']
mask_pil = Image.fromarray(detections_list[i]['mask_crop'])
image_rgba = image_pil.convert("RGBA")
transparent_bg = Image.new("RGBA", image_rgba.size, (0, 0, 0, 0))
# Copy the object area from the original image to a transparent background using a mask
segmented_image = Image.composite(
image_rgba,
transparent_bg,
mask_pil
)
detections_list[i]['image_segment'] = segmented_image
return detections_list
def skipbyconfidence(detections_list):
skip_index = []
for i in range(len(detections_list)):
if detections_list[i]['confidence'] < 0.3:
skip_index.append(i)
for i in skip_index[::-1]:
del detections_list[i]
return detections_list
def add_bbox_and_taggingtext_to_image(image, detections_list):
for i in range(len(detections_list)):
bbox = detections_list[i]['xyxy']
label = detections_list[i]['class_name']
confidence = detections_list[i]['confidence']
cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 2)
cv2.putText(image, f"{label} {confidence:.2f}", (int(bbox[0]), int((bbox[1]+bbox[3])/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
return image
def add_index_to_class(detections_list):
# If a class appears for the first time, add 0 to the object's class_name, add 1 to the second appearance, and so on
class_index = {}
for detection in detections_list:
class_name = detection['class_name']
if class_name not in class_index:
class_index[class_name] = 0
else:
class_index[class_name] += 1
detection['class_name'] = f"{class_name}{class_index[class_name]}"
return detections_list
|