Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,8 +5,7 @@ import requests, validators
|
|
| 5 |
import torch
|
| 6 |
import pathlib
|
| 7 |
from PIL import Image
|
| 8 |
-
from transformers import AutoFeatureExtractor, DetrForObjectDetection
|
| 9 |
-
|
| 10 |
import os
|
| 11 |
|
| 12 |
# colors for visualization
|
|
@@ -52,23 +51,15 @@ def visualize_prediction(pil_img, output_dict, threshold=0.7, id2label=None):
|
|
| 52 |
plt.axis("off")
|
| 53 |
return fig2img(plt.gcf())
|
| 54 |
|
| 55 |
-
def detect_objects(model_name,
|
| 56 |
|
| 57 |
#Extract model and feature extractor
|
| 58 |
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
|
| 59 |
|
| 60 |
-
if 'detr' in model_name:
|
| 61 |
-
|
| 62 |
-
model = DetrForObjectDetection.from_pretrained(model_name)
|
| 63 |
-
|
| 64 |
-
elif 'yolos' in model_name:
|
| 65 |
-
|
| 66 |
-
model = YolosForObjectDetection.from_pretrained(model_name)
|
| 67 |
-
|
| 68 |
-
if validators.url(url_input):
|
| 69 |
-
image = Image.open(requests.get(url_input, stream=True).raw)
|
| 70 |
|
| 71 |
-
|
| 72 |
image = image_input
|
| 73 |
|
| 74 |
#Make prediction
|
|
@@ -82,27 +73,18 @@ def detect_objects(model_name,url_input,image_input,threshold):
|
|
| 82 |
def set_example_image(example: list) -> dict:
|
| 83 |
return gr.Image.update(value=example[0])
|
| 84 |
|
| 85 |
-
def set_example_url(example: list) -> dict:
|
| 86 |
-
return gr.Textbox.update(value=example[0])
|
| 87 |
|
| 88 |
|
| 89 |
-
title = """<h1 id="title">
|
| 90 |
|
| 91 |
description = """
|
| 92 |
Links to HuggingFace Models:
|
| 93 |
- [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50)
|
| 94 |
- [facebook/detr-resnet-101](https://huggingface.co/facebook/detr-resnet-101)
|
| 95 |
-
- [hustvl/yolos-small](https://huggingface.co/hustvl/yolos-small)
|
| 96 |
-
- [hustvl/yolos-tiny](https://huggingface.co/hustvl/yolos-tiny)
|
| 97 |
-
"""
|
| 98 |
-
|
| 99 |
-
models = ["facebook/detr-resnet-50","facebook/detr-resnet-101",'hustvl/yolos-small','hustvl/yolos-tiny']
|
| 100 |
-
urls = ["https://c8.alamy.com/comp/J2AB4K/the-new-york-stock-exchange-on-the-wall-street-in-new-york-J2AB4K.jpg"]
|
| 101 |
-
|
| 102 |
-
twitter_link = """
|
| 103 |
-
[](https://twitter.com/nickmuchi)
|
| 104 |
"""
|
| 105 |
|
|
|
|
|
|
|
| 106 |
css = '''
|
| 107 |
h1#title {
|
| 108 |
text-align: center;
|
|
@@ -113,21 +95,10 @@ demo = gr.Blocks(css=css)
|
|
| 113 |
with demo:
|
| 114 |
gr.Markdown(title)
|
| 115 |
gr.Markdown(description)
|
| 116 |
-
gr.Markdown(twitter_link)
|
| 117 |
options = gr.Dropdown(choices=models,label='Select Object Detection Model',show_label=True)
|
| 118 |
slider_input = gr.Slider(minimum=0.2,maximum=1,value=0.7,label='Prediction Threshold')
|
| 119 |
|
| 120 |
-
with gr.Tabs():
|
| 121 |
-
with gr.TabItem('Image URL'):
|
| 122 |
-
with gr.Row():
|
| 123 |
-
url_input = gr.Textbox(lines=2,label='Enter valid image URL here..')
|
| 124 |
-
img_output_from_url = gr.Image(shape=(650,650))
|
| 125 |
-
|
| 126 |
-
with gr.Row():
|
| 127 |
-
example_url = gr.Dataset(components=[url_input],samples=[[str(url)] for url in urls])
|
| 128 |
-
|
| 129 |
-
url_but = gr.Button('Detect')
|
| 130 |
-
|
| 131 |
with gr.TabItem('Image Upload'):
|
| 132 |
with gr.Row():
|
| 133 |
img_input = gr.Image(type='pil')
|
|
@@ -136,18 +107,16 @@ with demo:
|
|
| 136 |
with gr.Row():
|
| 137 |
example_images = gr.Dataset(components=[img_input],
|
| 138 |
samples=[[path.as_posix()]
|
| 139 |
-
for path in sorted(pathlib.Path('images').rglob('*.
|
| 140 |
|
| 141 |
img_but = gr.Button('Detect')
|
| 142 |
|
| 143 |
|
| 144 |
-
|
| 145 |
-
img_but.click(detect_objects,inputs=[options,url_input,img_input,slider_input],outputs=img_output_from_upload,queue=True)
|
| 146 |
example_images.click(fn=set_example_image,inputs=[example_images],outputs=[img_input])
|
| 147 |
-
example_url.click(fn=set_example_url,inputs=[example_url],outputs=[url_input])
|
| 148 |
|
| 149 |
|
| 150 |
-
gr.Markdown("")
|
| 151 |
|
| 152 |
|
| 153 |
-
demo.launch(enable_queue=True)
|
|
|
|
| 5 |
import torch
|
| 6 |
import pathlib
|
| 7 |
from PIL import Image
|
| 8 |
+
from transformers import AutoFeatureExtractor, DetrForObjectDetection
|
|
|
|
| 9 |
import os
|
| 10 |
|
| 11 |
# colors for visualization
|
|
|
|
| 51 |
plt.axis("off")
|
| 52 |
return fig2img(plt.gcf())
|
| 53 |
|
| 54 |
+
def detect_objects(model_name,image_input,threshold):
|
| 55 |
|
| 56 |
#Extract model and feature extractor
|
| 57 |
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
|
| 58 |
|
| 59 |
+
if 'detr' in model_name:
|
| 60 |
+
model = DetrForObjectDetection.from_pretrained(model_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
+
if image_input:
|
| 63 |
image = image_input
|
| 64 |
|
| 65 |
#Make prediction
|
|
|
|
| 73 |
def set_example_image(example: list) -> dict:
|
| 74 |
return gr.Image.update(value=example[0])
|
| 75 |
|
|
|
|
|
|
|
| 76 |
|
| 77 |
|
| 78 |
+
title = """<h1 id="title">Detection for Drone</h1>"""
|
| 79 |
|
| 80 |
description = """
|
| 81 |
Links to HuggingFace Models:
|
| 82 |
- [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50)
|
| 83 |
- [facebook/detr-resnet-101](https://huggingface.co/facebook/detr-resnet-101)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
"""
|
| 85 |
|
| 86 |
+
models = ["facebook/detr-resnet-50","facebook/detr-resnet-101"]
|
| 87 |
+
#examples = ['1daaadc1e83fcecc7bfa920ed2773653.jpeg']
|
| 88 |
css = '''
|
| 89 |
h1#title {
|
| 90 |
text-align: center;
|
|
|
|
| 95 |
with demo:
|
| 96 |
gr.Markdown(title)
|
| 97 |
gr.Markdown(description)
|
|
|
|
| 98 |
options = gr.Dropdown(choices=models,label='Select Object Detection Model',show_label=True)
|
| 99 |
slider_input = gr.Slider(minimum=0.2,maximum=1,value=0.7,label='Prediction Threshold')
|
| 100 |
|
| 101 |
+
with gr.Tabs():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
with gr.TabItem('Image Upload'):
|
| 103 |
with gr.Row():
|
| 104 |
img_input = gr.Image(type='pil')
|
|
|
|
| 107 |
with gr.Row():
|
| 108 |
example_images = gr.Dataset(components=[img_input],
|
| 109 |
samples=[[path.as_posix()]
|
| 110 |
+
for path in sorted(pathlib.Path('images').rglob('*.jpeg'))])
|
| 111 |
|
| 112 |
img_but = gr.Button('Detect')
|
| 113 |
|
| 114 |
|
| 115 |
+
img_but.click(detect_objects,inputs=[options,img_input,slider_input],outputs=img_output_from_upload,queue=True)
|
|
|
|
| 116 |
example_images.click(fn=set_example_image,inputs=[example_images],outputs=[img_input])
|
|
|
|
| 117 |
|
| 118 |
|
| 119 |
+
#gr.Markdown("")
|
| 120 |
|
| 121 |
|
| 122 |
+
demo.launch(enable_queue=True)
|