jbilcke-hf commited on
Commit
9d633d3
·
1 Parent(s): 3895075

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +628 -58
app.py CHANGED
@@ -81,10 +81,76 @@ sd_pipe = None
81
  lama_cleaner_model= None
82
  ram_model = None
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  def load_image(image_path):
85
- if isinstance(image_path, np.ndarray):
86
- image_pil = Image.fromarray(image_path.astype(np.uint8))
87
- elif isinstance(image_path, PIL.Image.Image):
88
  image_pil = image_path
89
  else:
90
  image_pil = Image.open(image_path).convert("RGB") # load image
@@ -99,19 +165,16 @@ def load_image(image_path):
99
  image, _ = transform(image_pil, None) # 3, h, w
100
  return image_pil, image
101
 
102
-
103
- def load_model_hf(model_config_path, repo_id, filename, device='cpu'):
104
- args = SLConfig.fromfile(model_config_path)
105
- model = build_model(args)
106
  args.device = device
107
-
108
- cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
109
- checkpoint = torch.load(cache_file, map_location=device)
110
- log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
111
- print("Model loaded from {} \n => {}".format(cache_file, log))
112
  _ = model.eval()
113
- return model
114
-
115
  def get_grounding_output(model, image, caption, box_threshold, text_threshold, with_logits=True, device="cpu"):
116
  caption = caption.lower()
117
  caption = caption.strip()
@@ -147,28 +210,503 @@ def get_grounding_output(model, image, caption, box_threshold, text_threshold, w
147
 
148
  return boxes_filt, pred_phrases
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
- def run_inference(input_image, text_prompt, box_threshold, text_threshold, config_file, ckpt_repo_id, ckpt_filenmae):
152
-
153
- # Load the Grounding DINO model
154
- model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae)
155
-
156
- # Load the input image
157
- image_pil, image = load_image(input_image)
158
-
159
- # Run the object detection and grounding model
160
- boxes, labels = get_grounding_output(model, image, text_prompt, box_threshold, text_threshold)
161
-
162
- # Convert the boxes and labels to a JSON format
163
- result = []
164
- for box, label in zip(boxes, labels):
165
- result.append({
166
- "box": box.tolist(),
167
- "label": label
168
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
- return result
171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
  if __name__ == "__main__":
174
  parser = argparse.ArgumentParser("Grounded SAM demo", add_help=True)
@@ -177,28 +715,60 @@ if __name__ == "__main__":
177
  args = parser.parse_args()
178
  print(f'args = {args}')
179
 
180
- model_config_file = 'GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py'
181
- model_ckpt_repo_id = "ShilongLiu/GroundingDINO"
182
- model_ckpt_filenmae = "groundingdino_swint_ogc.pth"
183
-
184
- def inference_func(input_image, text_prompt):
185
- result = run_inference(input_image, text_prompt, 0.3, 0.25, model_config_file, model_ckpt_repo_id, model_ckpt_filenmae)
186
- return result
187
-
188
- # Create the Gradio interface for the model
189
- interface = gr.Interface(
190
- fn=inference_func,
191
- inputs=[
192
- gr.inputs.Image(label="Input Image"),
193
- gr.inputs.Textbox(label="Detection Prompt")
194
- ],
195
- outputs=gr.outputs.Dataframe(type="pandas"),
196
- title="Object Detection and Grounding",
197
- description="A Gradio app to detect objects in an image and ground them to captions using Grounding DINO.",
198
- server_name='0.0.0.0',
199
- debug=args.debug,
200
- share=args.share
201
- )
202
-
203
- # Launch the interface
204
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  lama_cleaner_model= None
82
  ram_model = None
83
 
84
+ def get_sam_vit_h_4b8939():
85
+ if not os.path.exists('./sam_vit_h_4b8939.pth'):
86
+ logger.info(f"get sam_vit_h_4b8939.pth...")
87
+ result = subprocess.run(['wget', 'https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth'], check=True)
88
+ print(f'wget sam_vit_h_4b8939.pth result = {result}')
89
+
90
+ def load_model_hf(model_config_path, repo_id, filename, device='cpu'):
91
+ args = SLConfig.fromfile(model_config_path)
92
+ model = build_model(args)
93
+ args.device = device
94
+
95
+ cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
96
+ checkpoint = torch.load(cache_file, map_location=device)
97
+ log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
98
+ print("Model loaded from {} \n => {}".format(cache_file, log))
99
+ _ = model.eval()
100
+ return model
101
+
102
+ def plot_boxes_to_image(image_pil, tgt):
103
+ H, W = tgt["size"]
104
+ boxes = tgt["boxes"]
105
+ labels = tgt["labels"]
106
+ assert len(boxes) == len(labels), "boxes and labels must have same length"
107
+
108
+ draw = ImageDraw.Draw(image_pil)
109
+ mask = Image.new("L", image_pil.size, 0)
110
+ mask_draw = ImageDraw.Draw(mask)
111
+
112
+ # draw boxes and masks
113
+ for box, label in zip(boxes, labels):
114
+ # from 0..1 to 0..W, 0..H
115
+ box = box * torch.Tensor([W, H, W, H])
116
+ # from xywh to xyxy
117
+ box[:2] -= box[2:] / 2
118
+ box[2:] += box[:2]
119
+ # random color
120
+ color = tuple(np.random.randint(0, 255, size=3).tolist())
121
+ # draw
122
+ x0, y0, x1, y1 = box
123
+ x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
124
+
125
+ draw.rectangle([x0, y0, x1, y1], outline=color, width=6)
126
+ # draw.text((x0, y0), str(label), fill=color)
127
+
128
+ font = ImageFont.load_default()
129
+ if hasattr(font, "getbbox"):
130
+ bbox = draw.textbbox((x0, y0), str(label), font)
131
+ else:
132
+ w, h = draw.textsize(str(label), font)
133
+ bbox = (x0, y0, w + x0, y0 + h)
134
+ # bbox = draw.textbbox((x0, y0), str(label))
135
+ draw.rectangle(bbox, fill=color)
136
+
137
+ try:
138
+ font = os.path.join(cv2.__path__[0],'qt','fonts','DejaVuSans.ttf')
139
+ font_size = 36
140
+ new_font = ImageFont.truetype(font, font_size)
141
+
142
+ draw.text((x0+2, y0+2), str(label), font=new_font, fill="white")
143
+ except Exception as e:
144
+ pass
145
+
146
+ mask_draw.rectangle([x0, y0, x1, y1], fill=255, width=6)
147
+
148
+
149
+ return image_pil, mask
150
+
151
  def load_image(image_path):
152
+ # # load image
153
+ if isinstance(image_path, PIL.Image.Image):
 
154
  image_pil = image_path
155
  else:
156
  image_pil = Image.open(image_path).convert("RGB") # load image
 
165
  image, _ = transform(image_pil, None) # 3, h, w
166
  return image_pil, image
167
 
168
+ def load_model(model_config_path, model_checkpoint_path, device):
169
+ args = SLConfig.fromfile(model_config_path)
 
 
170
  args.device = device
171
+ model = build_model(args)
172
+ checkpoint = torch.load(model_checkpoint_path, map_location=device) #"cpu")
173
+ load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
174
+ print(load_res)
 
175
  _ = model.eval()
176
+ return model
177
+
178
  def get_grounding_output(model, image, caption, box_threshold, text_threshold, with_logits=True, device="cpu"):
179
  caption = caption.lower()
180
  caption = caption.strip()
 
210
 
211
  return boxes_filt, pred_phrases
212
 
213
+ def show_mask(mask, ax, random_color=False):
214
+ if random_color:
215
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
216
+ else:
217
+ color = np.array([30/255, 144/255, 255/255, 0.6])
218
+ h, w = mask.shape[-2:]
219
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
220
+ ax.imshow(mask_image)
221
+
222
+ def show_box(box, ax, label):
223
+ x0, y0 = box[0], box[1]
224
+ w, h = box[2] - box[0], box[3] - box[1]
225
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
226
+ ax.text(x0, y0, label)
227
+
228
+ def xywh_to_xyxy(box, sizeW, sizeH):
229
+ if isinstance(box, list):
230
+ box = torch.Tensor(box)
231
+ box = box * torch.Tensor([sizeW, sizeH, sizeW, sizeH])
232
+ box[:2] -= box[2:] / 2
233
+ box[2:] += box[:2]
234
+ box = box.numpy()
235
+ return box
236
+
237
+ def mask_extend(img, box, extend_pixels=10, useRectangle=True):
238
+ box[0] = int(box[0])
239
+ box[1] = int(box[1])
240
+ box[2] = int(box[2])
241
+ box[3] = int(box[3])
242
+ region = img.crop(tuple(box))
243
+ new_width = box[2] - box[0] + 2*extend_pixels
244
+ new_height = box[3] - box[1] + 2*extend_pixels
245
+
246
+ region_BILINEAR = region.resize((int(new_width), int(new_height)))
247
+ if useRectangle:
248
+ region_draw = ImageDraw.Draw(region_BILINEAR)
249
+ region_draw.rectangle((0, 0, new_width, new_height), fill=(255, 255, 255))
250
+ img.paste(region_BILINEAR, (int(box[0]-extend_pixels), int(box[1]-extend_pixels)))
251
+ return img
252
+
253
+ def mix_masks(imgs):
254
+ re_img = 1 - np.asarray(imgs[0].convert("1"))
255
+ for i in range(len(imgs)-1):
256
+ re_img = np.multiply(re_img, 1 - np.asarray(imgs[i+1].convert("1")))
257
+ re_img = 1 - re_img
258
+ return Image.fromarray(np.uint8(255*re_img))
259
+
260
+ def set_device():
261
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
262
+ print(f'device={device}')
263
+
264
+ def load_groundingdino_model():
265
+ # initialize groundingdino model
266
+ global groundingdino_model
267
+ logger.info(f"initialize groundingdino model...")
268
+ groundingdino_model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae)
269
+
270
+ def load_sam_model():
271
+ # initialize SAM
272
+ global sam_model, sam_predictor, sam_mask_generator, sam_device
273
+ logger.info(f"initialize SAM model...")
274
+ sam_device = device
275
+ sam_model = build_sam(checkpoint=sam_checkpoint).to(sam_device)
276
+ sam_predictor = SamPredictor(sam_model)
277
+ sam_mask_generator = SamAutomaticMaskGenerator(sam_model)
278
+
279
+ def load_sd_model():
280
+ # initialize stable-diffusion-inpainting
281
+ global sd_pipe
282
+ logger.info(f"initialize stable-diffusion-inpainting...")
283
+ sd_pipe = None
284
+ if os.environ.get('IS_MY_DEBUG') is None:
285
+ sd_pipe = StableDiffusionInpaintPipeline.from_pretrained(
286
+ "runwayml/stable-diffusion-inpainting",
287
+ # revision="fp16",
288
+ # "stabilityai/stable-diffusion-2-inpainting",
289
+ torch_dtype=torch.float16,
290
+ )
291
+ sd_pipe = sd_pipe.to(device)
292
+
293
+ def load_lama_cleaner_model():
294
+ # initialize lama_cleaner
295
+ global lama_cleaner_model
296
+ logger.info(f"initialize lama_cleaner...")
297
+
298
+ lama_cleaner_model = ModelManager(
299
+ name='lama',
300
+ device='cpu', # device,
301
+ )
302
+
303
+ def lama_cleaner_process(image, mask, cleaner_size_limit=1080):
304
+ ori_image = image
305
+ if mask.shape[0] == image.shape[1] and mask.shape[1] == image.shape[0] and mask.shape[0] != mask.shape[1]:
306
+ # rotate image
307
+ ori_image = np.transpose(image[::-1, ...][:, ::-1], axes=(1, 0, 2))[::-1, ...]
308
+ image = ori_image
309
+
310
+ original_shape = ori_image.shape
311
+ interpolation = cv2.INTER_CUBIC
312
+
313
+ size_limit = cleaner_size_limit
314
+ if size_limit == -1:
315
+ size_limit = max(image.shape)
316
+ else:
317
+ size_limit = int(size_limit)
318
+
319
+ config = lama_Config(
320
+ ldm_steps=25,
321
+ ldm_sampler='plms',
322
+ zits_wireframe=True,
323
+ hd_strategy='Original',
324
+ hd_strategy_crop_margin=196,
325
+ hd_strategy_crop_trigger_size=1280,
326
+ hd_strategy_resize_limit=2048,
327
+ prompt='',
328
+ use_croper=False,
329
+ croper_x=0,
330
+ croper_y=0,
331
+ croper_height=512,
332
+ croper_width=512,
333
+ sd_mask_blur=5,
334
+ sd_strength=0.75,
335
+ sd_steps=50,
336
+ sd_guidance_scale=7.5,
337
+ sd_sampler='ddim',
338
+ sd_seed=42,
339
+ cv2_flag='INPAINT_NS',
340
+ cv2_radius=5,
341
+ )
342
+
343
+ if config.sd_seed == -1:
344
+ config.sd_seed = random.randint(1, 999999999)
345
 
346
+ # logger.info(f"Origin image shape_0_: {original_shape} / {size_limit}")
347
+ image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
348
+ # logger.info(f"Resized image shape_1_: {image.shape}")
349
+
350
+ # logger.info(f"mask image shape_0_: {mask.shape} / {type(mask)}")
351
+ mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
352
+ # logger.info(f"mask image shape_1_: {mask.shape} / {type(mask)}")
353
+
354
+ res_np_img = lama_cleaner_model(image, mask, config)
355
+ torch.cuda.empty_cache()
356
+
357
+ image = Image.open(io.BytesIO(numpy_to_bytes(res_np_img, 'png')))
358
+ return image
359
+
360
+ class Ram_Predictor(RamPredictor):
361
+ def __init__(self, config, device='cpu'):
362
+ self.config = config
363
+ self.device = torch.device(device)
364
+ self._build_model()
365
+
366
+ def _build_model(self):
367
+ self.model = RamModel(**self.config.model).to(self.device)
368
+ if self.config.load_from is not None:
369
+ self.model.load_state_dict(torch.load(self.config.load_from, map_location=self.device))
370
+ self.model.train()
371
+
372
+ def load_ram_model():
373
+ # load ram model
374
+ global ram_model
375
+ model_path = "./checkpoints/ram_epoch12.pth"
376
+ ram_config = dict(
377
+ model=dict(
378
+ pretrained_model_name_or_path='bert-base-uncased',
379
+ load_pretrained_weights=False,
380
+ num_transformer_layer=2,
381
+ input_feature_size=256,
382
+ output_feature_size=768,
383
+ cls_feature_size=512,
384
+ num_relation_classes=56,
385
+ pred_type='attention',
386
+ loss_type='multi_label_ce',
387
+ ),
388
+ load_from=model_path,
389
+ )
390
+ ram_config = mmengine_Config(ram_config)
391
+ ram_model = Ram_Predictor(ram_config, device)
392
+
393
+ # visualization
394
+ def draw_selected_mask(mask, draw):
395
+ color = (255, 0, 0, 153)
396
+ nonzero_coords = np.transpose(np.nonzero(mask))
397
+ for coord in nonzero_coords:
398
+ draw.point(coord[::-1], fill=color)
399
+
400
+ def draw_object_mask(mask, draw):
401
+ color = (0, 0, 255, 153)
402
+ nonzero_coords = np.transpose(np.nonzero(mask))
403
+ for coord in nonzero_coords:
404
+ draw.point(coord[::-1], fill=color)
405
+
406
+ def create_title_image(word1, word2, word3, width, font_path='./assets/OpenSans-Bold.ttf'):
407
+ # Define the colors to use for each word
408
+ color_red = (255, 0, 0)
409
+ color_black = (0, 0, 0)
410
+ color_blue = (0, 0, 255)
411
+
412
+ # Define the initial font size and spacing between words
413
+ font_size = 40
414
+
415
+ # Create a new image with the specified width and white background
416
+ image = Image.new('RGB', (width, 60), (255, 255, 255))
417
+
418
+ try:
419
+ # Load the specified font
420
+ font = ImageFont.truetype(font_path, font_size)
421
+
422
+ # Keep increasing the font size until all words fit within the desired width
423
+ while True:
424
+ # Create a draw object for the image
425
+ draw = ImageDraw.Draw(image)
426
+
427
+ word_spacing = font_size / 2
428
+ # Draw each word in the appropriate color
429
+ x_offset = word_spacing
430
+ draw.text((x_offset, 0), word1, color_red, font=font)
431
+ x_offset += font.getsize(word1)[0] + word_spacing
432
+ draw.text((x_offset, 0), word2, color_black, font=font)
433
+ x_offset += font.getsize(word2)[0] + word_spacing
434
+ draw.text((x_offset, 0), word3, color_blue, font=font)
435
+
436
+ word_sizes = [font.getsize(word) for word in [word1, word2, word3]]
437
+ total_width = sum([size[0] for size in word_sizes]) + word_spacing * 3
438
+
439
+ # Stop increasing font size if the image is within the desired width
440
+ if total_width <= width:
441
+ break
442
+
443
+ # Increase font size and reset the draw object
444
+ font_size -= 1
445
+ image = Image.new('RGB', (width, 50), (255, 255, 255))
446
+ font = ImageFont.truetype(font_path, font_size)
447
+ draw = None
448
+ except Exception as e:
449
+ pass
450
+
451
+ return image
452
+
453
+ def concatenate_images_vertical(image1, image2):
454
+ # Get the dimensions of the two images
455
+ width1, height1 = image1.size
456
+ width2, height2 = image2.size
457
+
458
+ # Create a new image with the combined height and the maximum width
459
+ new_image = Image.new('RGBA', (max(width1, width2), height1 + height2))
460
+
461
+ # Paste the first image at the top of the new image
462
+ new_image.paste(image1, (0, 0))
463
+
464
+ # Paste the second image below the first image
465
+ new_image.paste(image2, (0, height1))
466
+
467
+ return new_image
468
+
469
+ def relate_anything(input_image, k):
470
+ logger.info(f'relate_anything_1_{input_image.size}_')
471
+ w, h = input_image.size
472
+ max_edge = 1500
473
+ if w > max_edge or h > max_edge:
474
+ ratio = max(w, h) / max_edge
475
+ new_size = (int(w / ratio), int(h / ratio))
476
+ input_image.thumbnail(new_size)
477
+
478
+ logger.info(f'relate_anything_2_')
479
+ # load image
480
+ pil_image = input_image.convert('RGBA')
481
+ image = np.array(input_image)
482
+ sam_masks = sam_mask_generator.generate(image)
483
+ filtered_masks = sort_and_deduplicate(sam_masks)
484
+
485
+ logger.info(f'relate_anything_3_')
486
+ feat_list = []
487
+ for fm in filtered_masks:
488
+ feat = torch.Tensor(fm['feat']).unsqueeze(0).unsqueeze(0).to(device)
489
+ feat_list.append(feat)
490
+ feat = torch.cat(feat_list, dim=1).to(device)
491
+ matrix_output, rel_triplets = ram_model.predict(feat)
492
+
493
+ logger.info(f'relate_anything_4_')
494
+ pil_image_list = []
495
+ for i, rel in enumerate(rel_triplets[:k]):
496
+ s,o,r = int(rel[0]),int(rel[1]),int(rel[2])
497
+ relation = relation_classes[r]
498
+
499
+ mask_image = Image.new('RGBA', pil_image.size, color=(0, 0, 0, 0))
500
+ mask_draw = ImageDraw.Draw(mask_image)
501
+
502
+ draw_selected_mask(filtered_masks[s]['segmentation'], mask_draw)
503
+ draw_object_mask(filtered_masks[o]['segmentation'], mask_draw)
504
+
505
+ current_pil_image = pil_image.copy()
506
+ current_pil_image.alpha_composite(mask_image)
507
+
508
+ title_image = create_title_image('Red', relation, 'Blue', current_pil_image.size[0])
509
+ concate_pil_image = concatenate_images_vertical(current_pil_image, title_image)
510
+ pil_image_list.append(concate_pil_image)
511
+
512
+ logger.info(f'relate_anything_5_{len(pil_image_list)}')
513
+ return pil_image_list
514
+
515
+ mask_source_draw = "draw a mask on input image"
516
+ mask_source_segment = "type what to detect below"
517
+
518
+ def run_anything_task(input_image, text_prompt, task_type, inpaint_prompt, box_threshold, text_threshold,
519
+ iou_threshold, inpaint_mode, mask_source_radio, remove_mode, remove_mask_extend, num_relation, cleaner_size_limit=1080):
520
+ if (task_type == 'relate anything'):
521
+ output_images = relate_anything(input_image['image'], num_relation)
522
+ return output_images, gr.Gallery.update(label='relate images')
523
+
524
+ text_prompt = text_prompt.strip()
525
+ if not ((task_type == 'inpainting' or task_type == 'remove') and mask_source_radio == mask_source_draw):
526
+ if text_prompt == '':
527
+ return [], gr.Gallery.update(label='Detection prompt is not found!😂😂😂😂')
528
+
529
+ if input_image is None:
530
+ return [], gr.Gallery.update(label='Please upload a image!😂😂😂😂')
531
+
532
+ file_temp = int(time.time())
533
+ logger.info(f'run_anything_task_[{file_temp}]_{task_type}/{inpaint_mode}/[{mask_source_radio}]/{remove_mode}/{remove_mask_extend}_[{text_prompt}]/[{inpaint_prompt}]___1_')
534
+
535
+ output_images = []
536
+
537
+ # load image
538
+ if mask_source_radio == mask_source_draw:
539
+ input_mask_pil = input_image['mask']
540
+ input_mask = np.array(input_mask_pil.convert("L"))
541
+
542
+ if isinstance(input_image, dict):
543
+ image_pil, image = load_image(input_image['image'].convert("RGB"))
544
+ input_img = input_image['image']
545
+ output_images.append(input_image['image'])
546
+ else:
547
+ image_pil, image = load_image(input_image.convert("RGB"))
548
+ input_img = input_image
549
+ output_images.append(input_image)
550
 
551
+ size = image_pil.size
552
 
553
+ pred_dict = {
554
+ }
555
+
556
+ # run grounding dino model
557
+ if (task_type == 'inpainting' or task_type == 'remove') and mask_source_radio == mask_source_draw:
558
+ pass
559
+ else:
560
+ groundingdino_device = 'cpu'
561
+ if device != 'cpu':
562
+ try:
563
+ from groundingdino import _C
564
+ groundingdino_device = 'cuda:0'
565
+ except:
566
+ warnings.warn("Failed to load custom C++ ops. Running on CPU mode Only in groundingdino!")
567
+
568
+ boxes_filt, pred_phrases = get_grounding_output(
569
+ groundingdino_model, image, text_prompt, box_threshold, text_threshold, device=groundingdino_device
570
+ )
571
+ if boxes_filt.size(0) == 0:
572
+ logger.info(f'run_anything_task_[{file_temp}]_{task_type}_[{text_prompt}]_1_[No objects detected, please try others.]_')
573
+ return [], gr.Gallery.update(label='No objects detected, please try others.😂😂😂😂')
574
+ boxes_filt_ori = copy.deepcopy(boxes_filt)
575
+
576
+ pred_dict = {
577
+ "boxes": boxes_filt,
578
+ "size": [size[1], size[0]], # H,W
579
+ "labels": pred_phrases,
580
+ }
581
+
582
+ image_with_box = plot_boxes_to_image(copy.deepcopy(image_pil), pred_dict)[0]
583
+ output_images.append(image_with_box)
584
+
585
+ logger.info(f'run_anything_task_[{file_temp}]_{task_type}_2_')
586
+ if task_type == 'segment' or ((task_type == 'inpainting' or task_type == 'remove') and mask_source_radio == mask_source_segment):
587
+ image = np.array(input_img)
588
+ sam_predictor.set_image(image)
589
+
590
+ H, W = size[1], size[0]
591
+ for i in range(boxes_filt.size(0)):
592
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
593
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
594
+ boxes_filt[i][2:] += boxes_filt[i][:2]
595
+
596
+ boxes_filt = boxes_filt.to(sam_device)
597
+ transformed_boxes = sam_predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2])
598
+
599
+ masks, _, _, _ = sam_predictor.predict_torch(
600
+ point_coords = None,
601
+ point_labels = None,
602
+ boxes = transformed_boxes,
603
+ multimask_output = False,
604
+ )
605
+ # masks: [9, 1, 512, 512]
606
+ assert sam_checkpoint, 'sam_checkpoint is not found!'
607
+ # draw output image
608
+ plt.figure(figsize=(10, 10))
609
+ plt.imshow(image)
610
+ for mask in masks:
611
+ show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
612
+ for box, label in zip(boxes_filt, pred_phrases):
613
+ show_box(box.cpu().numpy(), plt.gca(), label)
614
+ plt.axis('off')
615
+ image_path = os.path.join(output_dir, f"grounding_seg_output_{file_temp}.jpg")
616
+ plt.savefig(image_path, bbox_inches="tight")
617
+ segment_image_result = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
618
+ os.remove(image_path)
619
+ output_images.append(segment_image_result)
620
+
621
+ logger.info(f'run_anything_task_[{file_temp}]_{task_type}_3_')
622
+ if task_type == 'detection' or task_type == 'segment':
623
+ logger.info(f'run_anything_task_[{file_temp}]_{task_type}_9_')
624
+ return pred_dict
625
+ elif task_type == 'inpainting' or task_type == 'remove':
626
+ if inpaint_prompt.strip() == '' and mask_source_radio == mask_source_segment:
627
+ task_type = 'remove'
628
+
629
+ logger.info(f'run_anything_task_[{file_temp}]_{task_type}_4_')
630
+ if mask_source_radio == mask_source_draw:
631
+ mask_pil = input_mask_pil
632
+ mask = input_mask
633
+ else:
634
+ masks_ori = copy.deepcopy(masks)
635
+ if inpaint_mode == 'merge':
636
+ masks = torch.sum(masks, dim=0).unsqueeze(0)
637
+ masks = torch.where(masks > 0, True, False)
638
+ mask = masks[0][0].cpu().numpy()
639
+ mask_pil = Image.fromarray(mask)
640
+ output_images.append(mask_pil.convert("RGB"))
641
+
642
+ if task_type == 'inpainting':
643
+ # inpainting pipeline
644
+ image_source_for_inpaint = image_pil.resize((512, 512))
645
+ image_mask_for_inpaint = mask_pil.resize((512, 512))
646
+ image_inpainting = sd_pipe(prompt=inpaint_prompt, image=image_source_for_inpaint, mask_image=image_mask_for_inpaint).images[0]
647
+ else:
648
+ # remove from mask
649
+ logger.info(f'run_anything_task_[{file_temp}]_{task_type}_5_')
650
+ if mask_source_radio == mask_source_segment:
651
+ mask_imgs = []
652
+ masks_shape = masks_ori.shape
653
+ boxes_filt_ori_array = boxes_filt_ori.numpy()
654
+ if inpaint_mode == 'merge':
655
+ extend_shape_0 = masks_shape[0]
656
+ extend_shape_1 = masks_shape[1]
657
+ else:
658
+ extend_shape_0 = 1
659
+ extend_shape_1 = 1
660
+ for i in range(extend_shape_0):
661
+ for j in range(extend_shape_1):
662
+ mask = masks_ori[i][j].cpu().numpy()
663
+ mask_pil = Image.fromarray(mask)
664
+
665
+ if remove_mode == 'segment':
666
+ useRectangle = False
667
+ else:
668
+ useRectangle = True
669
+
670
+ try:
671
+ remove_mask_extend = int(remove_mask_extend)
672
+ except:
673
+ remove_mask_extend = 10
674
+ mask_pil_exp = mask_extend(copy.deepcopy(mask_pil).convert("RGB"),
675
+ xywh_to_xyxy(torch.tensor(boxes_filt_ori_array[i]), size[0], size[1]),
676
+ extend_pixels=remove_mask_extend, useRectangle=useRectangle)
677
+ mask_imgs.append(mask_pil_exp)
678
+ mask_pil = mix_masks(mask_imgs)
679
+ output_images.append(mask_pil.convert("RGB"))
680
+
681
+ logger.info(f'run_anything_task_[{file_temp}]_{task_type}_6_')
682
+ image_inpainting = lama_cleaner_process(np.array(image_pil), np.array(mask_pil.convert("L")), cleaner_size_limit)
683
+ # output_images.append(image_inpainting)
684
+
685
+ logger.info(f'run_anything_task_[{file_temp}]_{task_type}_7_')
686
+ image_inpainting = image_inpainting.resize((image_pil.size[0], image_pil.size[1]))
687
+ output_images.append(image_inpainting)
688
+ logger.info(f'run_anything_task_[{file_temp}]_{task_type}_9_')
689
+ return output_images, gr.Gallery.update(label='result images')
690
+ else:
691
+ logger.info(f"task_type:{task_type} error!")
692
+ logger.info(f'run_anything_task_[{file_temp}]_9_9_')
693
+ return output_images, gr.Gallery.update(label='result images')
694
+
695
+ def change_radio_display(task_type, mask_source_radio):
696
+ text_prompt_visible = True
697
+ inpaint_prompt_visible = False
698
+ mask_source_radio_visible = False
699
+ num_relation_visible = False
700
+ if task_type == "inpainting":
701
+ inpaint_prompt_visible = True
702
+ if task_type == "inpainting" or task_type == "remove":
703
+ mask_source_radio_visible = True
704
+ if mask_source_radio == mask_source_draw:
705
+ text_prompt_visible = False
706
+ if task_type == "relate anything":
707
+ text_prompt_visible = False
708
+ num_relation_visible = True
709
+ return gr.Textbox.update(visible=text_prompt_visible), gr.Textbox.update(visible=inpaint_prompt_visible), gr.Radio.update(visible=mask_source_radio_visible), gr.Slider.update(visible=num_relation_visible)
710
 
711
  if __name__ == "__main__":
712
  parser = argparse.ArgumentParser("Grounded SAM demo", add_help=True)
 
715
  args = parser.parse_args()
716
  print(f'args = {args}')
717
 
718
+ set_device()
719
+ get_sam_vit_h_4b8939()
720
+ load_groundingdino_model()
721
+ load_sam_model()
722
+ load_sd_model()
723
+ load_lama_cleaner_model()
724
+ load_ram_model()
725
+
726
+ os.system("pip list")
727
+
728
+ block = gr.Blocks().queue()
729
+ with block:
730
+ with gr.Row():
731
+ with gr.Column():
732
+ input_image = gr.Image(source='upload', elem_id="image_upload", tool='sketch', type='pil', label="Upload")
733
+ task_type = gr.Radio(["detection", "segment", "inpainting", "remove", "relate anything"], value="detection",
734
+ label='Task type', visible=True)
735
+ mask_source_radio = gr.Radio([mask_source_draw, mask_source_segment],
736
+ value=mask_source_segment, label="Mask from",
737
+ visible=False)
738
+ text_prompt = gr.Textbox(label="Detection Prompt[To detect multiple objects, seperating each name with '.', like this: cat . dog . chair ]", placeholder="Cannot be empty")
739
+ inpaint_prompt = gr.Textbox(label="Inpaint Prompt (if this is empty, then remove)", visible=False)
740
+ num_relation = gr.Slider(label="How many relations do you want to see", minimum=1, maximum=20, value=5, step=1, visible=False)
741
+ run_button = gr.Button(label="Run", visible=True)
742
+ with gr.Accordion("Advanced options", open=False) as advanced_options:
743
+ box_threshold = gr.Slider(
744
+ label="Box Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.001
745
+ )
746
+ text_threshold = gr.Slider(
747
+ label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001
748
+ )
749
+ iou_threshold = gr.Slider(
750
+ label="IOU Threshold", minimum=0.0, maximum=1.0, value=0.8, step=0.001
751
+ )
752
+ inpaint_mode = gr.Radio(["merge", "first"], value="merge", label="inpaint_mode")
753
+ with gr.Row():
754
+ with gr.Column(scale=1):
755
+ remove_mode = gr.Radio(["segment", "rectangle"], value="segment", label='remove mode')
756
+ with gr.Column(scale=1):
757
+ remove_mask_extend = gr.Textbox(label="remove_mask_extend", value='10')
758
+
759
+ run_button.click(fn=run_anything_task, inputs=[
760
+ input_image, text_prompt, task_type, inpaint_prompt, box_threshold, text_threshold, iou_threshold, inpaint_mode, mask_source_radio, remove_mode, remove_mask_extend, num_relation], outputs=gr.outputs.Dataframe(type="pandas"), show_progress=True, queue=True)
761
+
762
+ mask_source_radio.change(fn=change_radio_display, inputs=[task_type, mask_source_radio], outputs=[text_prompt, inpaint_prompt, mask_source_radio, num_relation])
763
+ task_type.change(fn=change_radio_display, inputs=[task_type, mask_source_radio], outputs=[text_prompt, inpaint_prompt, mask_source_radio, num_relation])
764
+
765
+ DESCRIPTION = f'### This demo from [Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything). <br>'
766
+ DESCRIPTION += f'RAM from [RelateAnything](https://github.com/Luodian/RelateAnything). <br>'
767
+ DESCRIPTION += f'Remove(cleaner) from [lama-cleaner](https://github.com/Sanster/lama-cleaner). <br>'
768
+ DESCRIPTION += f'Thanks for their excellent work.'
769
+ DESCRIPTION += f'<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. \
770
+ <a href="https://huggingface.co/spaces/yizhangliu/Grounded-Segment-Anything?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
771
+ gr.Markdown(DESCRIPTION)
772
+
773
+ computer_info()
774
+ block.launch(server_name='0.0.0.0', debug=args.debug, share=args.share)