Spaces:
Runtime error
Runtime error
File size: 3,368 Bytes
1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a 1a881d8 55ece2a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import json
import os
from datetime import datetime, timezone
from src.display.formatting import styled_error, styled_message, styled_warning
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
from src.submission.check_validity import already_submitted_models
from src.about import Training_Dataset
REQUESTED_MODELS = None
def add_new_eval(
model_name : str = None,
model_link : str = None,
model_backbone : str = "Unknown",
precision : str = None,
model_parameters: float = 0,
paper_name: str = "None",
paper_link: str = "None",
training_dataset: str = "",
testing_type : str = None,
cmap_value : float = 0,
auroc_value : float = 0,
t1acc_value : float = 0,
):
global REQUESTED_MODELS
if not REQUESTED_MODELS:
REQUESTED_MODELS = already_submitted_models(EVAL_REQUESTS_PATH)
if model_name is None or model_name == "":
return styled_error("Please enter a model name")
if model_link is None or model_link == "":
return styled_error("Please provide a link to your model")
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
training_dataset_type = Training_Dataset.from_str(training_dataset)
if training_dataset_type.name != Training_Dataset.Other.name:
training_dataset = training_dataset_type.name
training_dataset = "_".join(training_dataset.split())
model_name = "_".join(model_name.split())
testing_type = testing_type.lower()
print("Adding new eval")
eval_entry = {
"model_name": model_name,
"model_link": model_link,
"model_backbone": model_backbone,
"precision": precision,
"model_parameters": model_parameters,
"paper_name": paper_name,
"paper_link": paper_link,
"status": "PENDING",
"submitted_time": current_time,
"training_dataset": training_dataset,
"testing_type": testing_type,
"claimed_cmap": cmap_value,
"claimed_auroc": auroc_value,
"claimed_t1acc": t1acc_value
}
if f"{model_name}_{training_dataset}_{testing_type}_{precision}" in REQUESTED_MODELS:
return styled_warning("This model has been already submitted.")
print("Creating eval file")
try:
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{model_name}"
os.makedirs(OUT_DIR, exist_ok=True)
out_path = f"{OUT_DIR}/{model_name}_eval_request_{precision}_{training_dataset}_{testing_type}.json"
with open(out_path, "w") as f:
f.write(json.dumps(eval_entry))
except:
return styled_error("There was an error while creating your request. Make sure there are no \"/\" in your model name.")
print("Uploading eval file")
try:
API.upload_file(
path_or_fileobj=out_path,
path_in_repo=out_path.split("eval-queue/")[1],
repo_id=QUEUE_REPO,
repo_type="dataset",
commit_message=f"Add {model_name}_{training_dataset}_{testing_type} to eval queue",
)
except:
return styled_error("There was an error while uploading your request.")
# Remove the local file
os.remove(out_path)
return styled_message(
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
)
|