Spaces:
Paused
Paused
Commit
Β·
93de92f
1
Parent(s):
48104ec
Update app.py
Browse files
app.py
CHANGED
|
@@ -466,52 +466,6 @@ def concatenate_images_vertical(image1, image2):
|
|
| 466 |
|
| 467 |
return new_image
|
| 468 |
|
| 469 |
-
def relate_anything(input_image, k):
|
| 470 |
-
logger.info(f'relate_anything_1_{input_image.size}_')
|
| 471 |
-
w, h = input_image.size
|
| 472 |
-
max_edge = 1500
|
| 473 |
-
if w > max_edge or h > max_edge:
|
| 474 |
-
ratio = max(w, h) / max_edge
|
| 475 |
-
new_size = (int(w / ratio), int(h / ratio))
|
| 476 |
-
input_image.thumbnail(new_size)
|
| 477 |
-
|
| 478 |
-
logger.info(f'relate_anything_2_')
|
| 479 |
-
# load image
|
| 480 |
-
pil_image = input_image.convert('RGBA')
|
| 481 |
-
image = np.array(input_image)
|
| 482 |
-
sam_masks = sam_mask_generator.generate(image)
|
| 483 |
-
filtered_masks = sort_and_deduplicate(sam_masks)
|
| 484 |
-
|
| 485 |
-
logger.info(f'relate_anything_3_')
|
| 486 |
-
feat_list = []
|
| 487 |
-
for fm in filtered_masks:
|
| 488 |
-
feat = torch.Tensor(fm['feat']).unsqueeze(0).unsqueeze(0).to(device)
|
| 489 |
-
feat_list.append(feat)
|
| 490 |
-
feat = torch.cat(feat_list, dim=1).to(device)
|
| 491 |
-
matrix_output, rel_triplets = ram_model.predict(feat)
|
| 492 |
-
|
| 493 |
-
logger.info(f'relate_anything_4_')
|
| 494 |
-
pil_image_list = []
|
| 495 |
-
for i, rel in enumerate(rel_triplets[:k]):
|
| 496 |
-
s,o,r = int(rel[0]),int(rel[1]),int(rel[2])
|
| 497 |
-
relation = relation_classes[r]
|
| 498 |
-
|
| 499 |
-
mask_image = Image.new('RGBA', pil_image.size, color=(0, 0, 0, 0))
|
| 500 |
-
mask_draw = ImageDraw.Draw(mask_image)
|
| 501 |
-
|
| 502 |
-
draw_selected_mask(filtered_masks[s]['segmentation'], mask_draw)
|
| 503 |
-
draw_object_mask(filtered_masks[o]['segmentation'], mask_draw)
|
| 504 |
-
|
| 505 |
-
current_pil_image = pil_image.copy()
|
| 506 |
-
current_pil_image.alpha_composite(mask_image)
|
| 507 |
-
|
| 508 |
-
title_image = create_title_image('Red', relation, 'Blue', current_pil_image.size[0])
|
| 509 |
-
concate_pil_image = concatenate_images_vertical(current_pil_image, title_image)
|
| 510 |
-
pil_image_list.append(concate_pil_image)
|
| 511 |
-
|
| 512 |
-
logger.info(f'relate_anything_5_{len(pil_image_list)}')
|
| 513 |
-
return pil_image_list
|
| 514 |
-
|
| 515 |
mask_source_draw = "draw a mask on input image"
|
| 516 |
mask_source_segment = "type what to detect below"
|
| 517 |
|
|
@@ -528,7 +482,6 @@ def run_anything_task(input_image, text_prompt, box_threshold, text_threshold,
|
|
| 528 |
return [], gr.Gallery.update(label='Please upload a image!ππππ')
|
| 529 |
|
| 530 |
file_temp = int(time.time())
|
| 531 |
-
logger.info(f'run_anything_task_[{file_temp}]_{task_type}/{inpaint_mode}/[{mask_source_radio}]/{remove_mode}/{remove_mask_extend}_[{text_prompt}]/[{inpaint_prompt}]___1_')
|
| 532 |
|
| 533 |
output_images = []
|
| 534 |
|
|
@@ -662,13 +615,6 @@ if __name__ == "__main__":
|
|
| 662 |
run_button.click(fn=run_anything_task, inputs=[
|
| 663 |
input_image, text_prompt, box_threshold, text_threshold, iou_threshold], outputs=[image_gallery, image_gallery], show_progress=True, queue=True)
|
| 664 |
|
| 665 |
-
DESCRIPTION = f'### This demo from [Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything). <br>'
|
| 666 |
-
DESCRIPTION += f'RAM from [RelateAnything](https://github.com/Luodian/RelateAnything). <br>'
|
| 667 |
-
DESCRIPTION += f'Remove(cleaner) from [lama-cleaner](https://github.com/Sanster/lama-cleaner). <br>'
|
| 668 |
-
DESCRIPTION += f'Thanks for their excellent work.'
|
| 669 |
-
DESCRIPTION += f'<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. \
|
| 670 |
-
<a href="https://huggingface.co/spaces/yizhangliu/Grounded-Segment-Anything?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
| 671 |
-
gr.Markdown(DESCRIPTION)
|
| 672 |
|
| 673 |
computer_info()
|
| 674 |
block.launch(server_name='0.0.0.0', debug=args.debug, share=args.share)
|
|
|
|
| 466 |
|
| 467 |
return new_image
|
| 468 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 469 |
mask_source_draw = "draw a mask on input image"
|
| 470 |
mask_source_segment = "type what to detect below"
|
| 471 |
|
|
|
|
| 482 |
return [], gr.Gallery.update(label='Please upload a image!ππππ')
|
| 483 |
|
| 484 |
file_temp = int(time.time())
|
|
|
|
| 485 |
|
| 486 |
output_images = []
|
| 487 |
|
|
|
|
| 615 |
run_button.click(fn=run_anything_task, inputs=[
|
| 616 |
input_image, text_prompt, box_threshold, text_threshold, iou_threshold], outputs=[image_gallery, image_gallery], show_progress=True, queue=True)
|
| 617 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 618 |
|
| 619 |
computer_info()
|
| 620 |
block.launch(server_name='0.0.0.0', debug=args.debug, share=args.share)
|