Spaces:
Runtime error
Runtime error
import os | |
import yaml | |
import numpy as np | |
from matplotlib import cm | |
import gradio as gr | |
import deeplabcut | |
import dlclibrary | |
import transformers | |
from PIL import Image | |
import requests | |
from viz_utils import save_results_as_json, draw_keypoints_on_image, draw_bbox_w_text, save_results_only_dlc | |
from detection_utils import predict_md, crop_animal_detections | |
from ui_utils import gradio_inputs_for_MD_DLC, gradio_outputs_for_MD_DLC, gradio_description_and_examples | |
from deeplabcut.utils import auxiliaryfunctions | |
from dlclibrary.dlcmodelzoo.modelzoo_download import ( | |
download_huggingface_model, | |
MODELOPTIONS, | |
) | |
# TESTING (passes) download the SuperAnimal models: | |
#model = 'superanimal_topviewmouse' | |
#train_dir = 'DLC_models/sa-tvm' | |
#download_huggingface_model(model, train_dir) | |
# grab demo data cooco cat: | |
url = "http://images.cocodataset.org/val2017/000000039769.jpg" | |
image = Image.open(requests.get(url, stream=True).raw) | |
# megadetector and dlc model look up | |
MD_models_dict = {'md_v5a': "MD_models/md_v5a.0.0.pt", # | |
'md_v5b': "MD_models/md_v5b.0.0.pt"} | |
# DLC models target dirs | |
DLC_models_dict = {'superanimal_topviewmouse': "DLC_models/sa-tvm", | |
'superanimal_quadruped': "DLC_models/sa-q", | |
'full_human': "DLC_models/DLC_human_dancing/"} | |
##################################################### | |
def predict_pipeline(img_input, | |
mega_model_input, | |
dlc_model_input_str, | |
flag_dlc_only, | |
flag_show_str_labels, | |
bbox_likelihood_th, | |
kpts_likelihood_th, | |
font_style, | |
font_size, | |
keypt_color, | |
marker_size, | |
): | |
if not flag_dlc_only: | |
############################################################ | |
# ### Run Megadetector | |
md_results = predict_md(img_input, | |
MD_models_dict[mega_model_input], #mega_model_input, | |
size=640) #Image.fromarray(results.imgs[0]) | |
################################################################ | |
# Obtain animal crops for bboxes with confidence above th | |
list_crops = crop_animal_detections(img_input, | |
md_results, | |
bbox_likelihood_th) | |
############################################################ | |
## Get DLC model and label map | |
# If model is found: do not download (previous execution is likely within same day) | |
# TODO: can we ask the user whether to reload dlc model if a directory is found? | |
if os.path.isdir(DLC_models_dict[dlc_model_input_str]) and \ | |
len(os.listdir(DLC_models_dict[dlc_model_input_str])) > 0: | |
path_to_DLCmodel = DLC_models_dict[dlc_model_input_str] | |
else: | |
path_to_DLCmodel = download_huggingface_model(dlc_model_input_str, | |
DLC_models_dict[dlc_model_input_str]) | |
# extract map label ids to strings | |
pose_cfg_path = os.path.join(DLC_models_dict[dlc_model_input_str], | |
'pose_cfg.yaml') | |
with open(pose_cfg_path, "r") as stream: | |
pose_cfg_dict = yaml.safe_load(stream) | |
map_label_id_to_str = dict([(k,v) for k,v in zip([el[0] for el in pose_cfg_dict['all_joints']], # pose_cfg_dict['all_joints'] is a list of one-element lists, | |
pose_cfg_dict['all_joints_names'])]) | |
######################################################### | |
# Define user interface and launch | |
inputs = gradio_inputs_for_MD_DLC(list(MD_models_dict.keys()), | |
list(DLC_models_dict.keys())) | |
outputs = gradio_outputs_for_MD_DLC() | |
[gr_title, | |
gr_description, | |
examples] = gradio_description_and_examples() | |
# launch | |
demo = gr.Interface(predict_pipeline, | |
inputs=inputs, | |
outputs=outputs, | |
title=gr_title, | |
description=gr_description, | |
examples=examples, | |
theme="huggingface") | |
demo.launch(enable_queue=True, share=True) | |