Spaces:
Runtime error
Runtime error
Refactor yolov10_inference and yolov10_video_inference to take a single argument containing all inputs.
Browse files
app.py
CHANGED
|
@@ -31,7 +31,8 @@ category_dict = {
|
|
| 31 |
}
|
| 32 |
|
| 33 |
@spaces.GPU(duration=200)
|
| 34 |
-
def yolov10_inference(
|
|
|
|
| 35 |
model_path = download_models(model_id)
|
| 36 |
model = YOLOv10(model_path)
|
| 37 |
results = model(source=image, imgsz=image_size, iou=iou_threshold, conf=conf_threshold, verbose=False)[0]
|
|
@@ -45,7 +46,8 @@ def yolov10_inference(image, model_id, image_size, conf_threshold, iou_threshold
|
|
| 45 |
|
| 46 |
return annotated_image
|
| 47 |
|
| 48 |
-
def yolov10_video_inference(
|
|
|
|
| 49 |
model_path = download_models(model_id)
|
| 50 |
model = YOLOv10(model_path)
|
| 51 |
|
|
@@ -133,8 +135,14 @@ def app():
|
|
| 133 |
output_image = gr.Image(type="numpy", label="Annotated Image", visible=True)
|
| 134 |
output_video = gr.Video(label="Annotated Video", visible=False)
|
| 135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
yolov10_infer.click(
|
| 137 |
-
fn=
|
| 138 |
inputs=[
|
| 139 |
image_or_video,
|
| 140 |
image,
|
|
@@ -186,7 +194,7 @@ def app():
|
|
| 186 |
0.45,
|
| 187 |
],
|
| 188 |
],
|
| 189 |
-
fn=
|
| 190 |
inputs=[
|
| 191 |
image_or_video,
|
| 192 |
image,
|
|
|
|
| 31 |
}
|
| 32 |
|
| 33 |
@spaces.GPU(duration=200)
|
| 34 |
+
def yolov10_inference(inputs):
|
| 35 |
+
image, model_id, image_size, conf_threshold, iou_threshold = inputs[1], inputs[2], inputs[3], inputs[4], inputs[5]
|
| 36 |
model_path = download_models(model_id)
|
| 37 |
model = YOLOv10(model_path)
|
| 38 |
results = model(source=image, imgsz=image_size, iou=iou_threshold, conf=conf_threshold, verbose=False)[0]
|
|
|
|
| 46 |
|
| 47 |
return annotated_image
|
| 48 |
|
| 49 |
+
def yolov10_video_inference(inputs):
|
| 50 |
+
video, model_id, image_size, conf_threshold, iou_threshold = inputs[2], inputs[3], inputs[4], inputs[5], inputs[6]
|
| 51 |
model_path = download_models(model_id)
|
| 52 |
model = YOLOv10(model_path)
|
| 53 |
|
|
|
|
| 135 |
output_image = gr.Image(type="numpy", label="Annotated Image", visible=True)
|
| 136 |
output_video = gr.Video(label="Annotated Video", visible=False)
|
| 137 |
|
| 138 |
+
def process_inputs(inputs):
|
| 139 |
+
if inputs[0] == "Image":
|
| 140 |
+
return yolov10_inference(inputs)
|
| 141 |
+
else:
|
| 142 |
+
return yolov10_video_inference(inputs)
|
| 143 |
+
|
| 144 |
yolov10_infer.click(
|
| 145 |
+
fn=process_inputs,
|
| 146 |
inputs=[
|
| 147 |
image_or_video,
|
| 148 |
image,
|
|
|
|
| 194 |
0.45,
|
| 195 |
],
|
| 196 |
],
|
| 197 |
+
fn=process_inputs,
|
| 198 |
inputs=[
|
| 199 |
image_or_video,
|
| 200 |
image,
|