Ruslan
Clone Leaderboard
55ece2a
import json
import os
from datetime import datetime, timezone
from src.display.formatting import styled_error, styled_message, styled_warning
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
from src.submission.check_validity import already_submitted_models
from src.about import Training_Dataset
REQUESTED_MODELS = None
def add_new_eval(
model_name : str = None,
model_link : str = None,
model_backbone : str = "Unknown",
precision : str = None,
model_parameters: float = 0,
paper_name: str = "None",
paper_link: str = "None",
training_dataset: str = "",
testing_type : str = None,
cmap_value : float = 0,
auroc_value : float = 0,
t1acc_value : float = 0,
):
global REQUESTED_MODELS
if not REQUESTED_MODELS:
REQUESTED_MODELS = already_submitted_models(EVAL_REQUESTS_PATH)
if model_name is None or model_name == "":
return styled_error("Please enter a model name")
if model_link is None or model_link == "":
return styled_error("Please provide a link to your model")
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
training_dataset_type = Training_Dataset.from_str(training_dataset)
if training_dataset_type.name != Training_Dataset.Other.name:
training_dataset = training_dataset_type.name
training_dataset = "_".join(training_dataset.split())
model_name = "_".join(model_name.split())
testing_type = testing_type.lower()
print("Adding new eval")
eval_entry = {
"model_name": model_name,
"model_link": model_link,
"model_backbone": model_backbone,
"precision": precision,
"model_parameters": model_parameters,
"paper_name": paper_name,
"paper_link": paper_link,
"status": "PENDING",
"submitted_time": current_time,
"training_dataset": training_dataset,
"testing_type": testing_type,
"claimed_cmap": cmap_value,
"claimed_auroc": auroc_value,
"claimed_t1acc": t1acc_value
}
if f"{model_name}_{training_dataset}_{testing_type}_{precision}" in REQUESTED_MODELS:
return styled_warning("This model has been already submitted.")
print("Creating eval file")
try:
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{model_name}"
os.makedirs(OUT_DIR, exist_ok=True)
out_path = f"{OUT_DIR}/{model_name}_eval_request_{precision}_{training_dataset}_{testing_type}.json"
with open(out_path, "w") as f:
f.write(json.dumps(eval_entry))
except:
return styled_error("There was an error while creating your request. Make sure there are no \"/\" in your model name.")
print("Uploading eval file")
try:
API.upload_file(
path_or_fileobj=out_path,
path_in_repo=out_path.split("eval-queue/")[1],
repo_id=QUEUE_REPO,
repo_type="dataset",
commit_message=f"Add {model_name}_{training_dataset}_{testing_type} to eval queue",
)
except:
return styled_error("There was an error while uploading your request.")
# Remove the local file
os.remove(out_path)
return styled_message(
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
)