Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,350 Bytes
68e3bf5 f4c379b 9e56ba5 5c10599 9e56ba5 96a688e 9e56ba5 359a44c 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 f4c379b 9e56ba5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import spaces
import gradio as gr
from detect_strongsort import run
import os
import threading
should_continue = True
@spaces.GPU(duration=60)
def yolov9_inference(model_id, image_size, conf_threshold, iou_threshold, img_path=None, vid_path=None):
global should_continue
img_extensions = ['.jpg', '.jpeg', '.png', '.gif'] # Add more image extensions if needed
vid_extensions = ['.mp4', '.avi', '.mov', '.mkv'] # Add more video extensions if needed
input_path = None
if img_path is not None:
_, img_extension = os.path.splitext(img_path)
if img_extension.lower() in img_extensions:
input_path = img_path
elif vid_path is not None:
_, vid_extension = os.path.splitext(vid_path)
if vid_extension.lower() in vid_extensions:
input_path = vid_path
output_path = run(yolo_weights=model_id, imgsz=(image_size,image_size), conf_thres=conf_threshold, iou_thres=iou_threshold, source=input_path, device='0', strong_sort_weights = "osnet_x0_25_msmt17.pt", hide_conf= True)
# Assuming output_path is the path to the output file
_, output_extension = os.path.splitext(output_path)
if output_extension.lower() in img_extensions:
output_image = output_path # Load the image file here
output_video = None
elif output_extension.lower() in vid_extensions:
output_image = None
output_video = output_path # Load the video file here
return output_image, output_video, output_path
@spaces.GPU(duration=60)
def inference(model_id, image_size, conf_threshold, iou_threshold, img_path=None, vid_path=None):
global should_continue
should_continue = True
output_image, output_video, output_path = yolov9_inference(model_id, image_size, conf_threshold, iou_threshold, img_path, vid_path)
return output_image, output_video, output_path
def stop_processing():
global should_continue
should_continue = False
return "Stop..."
def app():
with gr.Blocks():
with gr.Row():
with gr.Column():
gr.HTML("<h2>Input Parameters</h2>")
img_path = gr.File(label="Image")
vid_path = gr.File(label="Video")
model_id = gr.Dropdown(
label="Model",
choices=[
"last_best_model.pt",
"best_model-converted.pt"
],
value="./last_best_model.pt"
)
image_size = gr.Slider(
label="Image Size",
minimum=320,
maximum=1280,
step=32,
value=640,
)
conf_threshold = gr.Slider(
label="Confidence Threshold",
minimum=0.1,
maximum=1.0,
step=0.1,
value=0.4,
)
iou_threshold = gr.Slider(
label="IoU Threshold",
minimum=0.1,
maximum=1.0,
step=0.1,
value=0.5,
)
yolov9_infer = gr.Button(value="Inference")
stop_button = gr.Button(value="Stop")
with gr.Column():
gr.HTML("<h2>Output</h2>")
output_image = gr.Image(type="numpy",label="Output Image")
output_video = gr.Video(label="Output Video")
output_path = gr.Textbox(label="Output path")
yolov9_infer.click(
fn=inference,
inputs=[
model_id,
image_size,
conf_threshold,
iou_threshold,
img_path,
vid_path
],
outputs=[output_image, output_video, output_path],
)
stop_button.click(stop_processing)
gradio_app = gr.Blocks()
with gradio_app:
gr.HTML(
"""
<h1 style='text-align: center'>
YOLOv9: Real-time Object Detection
</h1>
""")
css = """
body {
background-color: #f0f0f0;
}
h1 {
color: #4CAF50;
}
"""
with gr.Row():
with gr.Column():
app()
gradio_app.launch(debug=True)
|