Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
from detect_strongsort import run | |
import os | |
import threading | |
should_continue = True | |
def yolov9_inference(model_id, image_size, conf_threshold, iou_threshold, img_path=None, vid_path=None): | |
global should_continue | |
img_extensions = ['.jpg', '.jpeg', '.png', '.gif'] # Add more image extensions if needed | |
vid_extensions = ['.mp4', '.avi', '.mov', '.mkv'] # Add more video extensions if needed | |
input_path = None | |
if img_path is not None: | |
_, img_extension = os.path.splitext(img_path) | |
if img_extension.lower() in img_extensions: | |
input_path = img_path | |
elif vid_path is not None: | |
_, vid_extension = os.path.splitext(vid_path) | |
if vid_extension.lower() in vid_extensions: | |
input_path = vid_path | |
output_path = run(yolo_weights=model_id, imgsz=(image_size,image_size), conf_thres=conf_threshold, iou_thres=iou_threshold, source=input_path, device='cpu', strong_sort_weights = "osnet_x0_25_msmt17.pt", hide_conf= True) | |
# Assuming output_path is the path to the output file | |
_, output_extension = os.path.splitext(output_path) | |
if output_extension.lower() in img_extensions: | |
output_image = output_path # Load the image file here | |
output_video = None | |
elif output_extension.lower() in vid_extensions: | |
output_image = None | |
output_video = output_path # Load the video file here | |
return output_image, output_video, output_path | |
def inference(model_id, image_size, conf_threshold, iou_threshold, img_path=None, vid_path=None): | |
global should_continue | |
should_continue = True | |
output_image, output_video, output_path = yolov9_inference(model_id, image_size, conf_threshold, iou_threshold, img_path, vid_path) | |
return output_image, output_video, output_path | |
def stop_processing(): | |
global should_continue | |
should_continue = False | |
return "Stop..." | |
def app(): | |
with gr.Blocks(): | |
with gr.Row(): | |
with gr.Column(): | |
gr.HTML("<h2>Input Parameters</h2>") | |
img_path = gr.File(label="Image") | |
vid_path = gr.File(label="Video") | |
model_id = gr.Dropdown( | |
label="Model", | |
choices=[ | |
"last_best_model.pt", | |
"best_model-converted.pt" | |
], | |
value="./last_best_model.pt" | |
) | |
image_size = gr.Slider( | |
label="Image Size", | |
minimum=320, | |
maximum=1280, | |
step=32, | |
value=640, | |
) | |
conf_threshold = gr.Slider( | |
label="Confidence Threshold", | |
minimum=0.1, | |
maximum=1.0, | |
step=0.1, | |
value=0.4, | |
) | |
iou_threshold = gr.Slider( | |
label="IoU Threshold", | |
minimum=0.1, | |
maximum=1.0, | |
step=0.1, | |
value=0.5, | |
) | |
yolov9_infer = gr.Button(value="Inference") | |
stop_button = gr.Button(value="Stop") | |
with gr.Column(): | |
gr.HTML("<h2>Output</h2>") | |
output_image = gr.Image(type="numpy",label="Output Image") | |
output_video = gr.Video(label="Output Video") | |
output_path = gr.Textbox(label="Output path") | |
yolov9_infer.click( | |
fn=inference, | |
inputs=[ | |
model_id, | |
image_size, | |
conf_threshold, | |
iou_threshold, | |
img_path, | |
vid_path | |
], | |
outputs=[output_image, output_video, output_path], | |
) | |
stop_button.click(stop_processing) | |
gradio_app = gr.Blocks() | |
with gradio_app: | |
gr.HTML( | |
""" | |
<h1 style='text-align: center'> | |
YOLOv9: Real-time Object Detection | |
</h1> | |
""") | |
css = """ | |
body { | |
background-color: #f0f0f0; | |
} | |
h1 { | |
color: #4CAF50; | |
} | |
""" | |
with gr.Row(): | |
with gr.Column(): | |
app() | |
gradio_app.launch(debug=True) | |