Shiyu Zhao
Update space
d38a2a4
raw
history blame
39 kB
import gradio as gr
import pandas as pd
import numpy as np
import os
import re
from datetime import datetime
import json
import torch
from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor, as_completed
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from huggingface_hub import HfApi
import shutil
import tempfile
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from queue import Queue
import threading
from threading import Lock
from stark_qa import load_qa
from stark_qa.evaluator import Evaluator
from utils.hub_storage import HubStorage
from utils.token_handler import TokenHandler
# Initialize storage once at startup
try:
REPO_ID = "snap-stanford/stark-leaderboard" # Replace with your space name
hub_storage = HubStorage(REPO_ID)
except Exception as e:
raise RuntimeError(f"Failed to initialize HuggingFace Hub storage: {e}")
# Global lock for thread-safe operations
result_lock = Lock()
def process_single_instance(args):
idx, eval_dict, qa_dataset, evaluator, eval_metrics = args
query, query_id, answer_ids, meta_info = qa_dataset[idx]
try:
# Access prediction using dictionary instead of DataFrame
if query_id not in eval_dict:
raise IndexError(f'Error when processing query_id={query_id}, please make sure the predicted results exist for this query.')
pred_rank = eval_dict[query_id]
if isinstance(pred_rank, str):
try:
pred_rank = eval(pred_rank)
except SyntaxError as e:
raise ValueError(f'Failed to parse pred_rank as a list for query_id={query_id}: {e}')
if not isinstance(pred_rank, list):
raise TypeError(f'Error when processing query_id={query_id}, expected pred_rank to be a list but got {type(pred_rank)}.')
pred_dict = {pred_rank[i]: -i for i in range(min(100, len(pred_rank)))}
answer_ids = torch.LongTensor(answer_ids)
# Evaluate metrics
result = evaluator.evaluate(pred_dict, answer_ids, metrics=eval_metrics)
result["idx"], result["query_id"] = idx, query_id
return result
except Exception as e:
raise RuntimeError(f'Error processing query_id={query_id}: {str(e)}')
def compute_metrics(csv_path: str, dataset: str, split: str, num_threads: int = 4):
candidate_ids_dict = {
'amazon': [i for i in range(957192)],
'mag': [i for i in range(1172724, 1872968)],
'prime': [i for i in range(129375)]
}
try:
# Read and validate CSV
eval_csv = pd.read_csv(csv_path)
if 'query_id' not in eval_csv.columns:
raise ValueError('No `query_id` column found in the submitted csv.')
if 'pred_rank' not in eval_csv.columns:
raise ValueError('No `pred_rank` column found in the submitted csv.')
# Convert DataFrame to dictionary for thread-safe access
eval_dict = dict(zip(eval_csv['query_id'], eval_csv['pred_rank']))
# Validate input parameters
if dataset not in candidate_ids_dict:
raise ValueError(f"Invalid dataset '{dataset}', expected one of {list(candidate_ids_dict.keys())}.")
if split not in ['test', 'test-0.1', 'human_generated_eval']:
raise ValueError(f"Invalid split '{split}', expected one of ['test', 'test-0.1', 'human_generated_eval'].")
# Initialize evaluator and metrics
evaluator = Evaluator(candidate_ids_dict[dataset])
eval_metrics = ['hit@1', 'hit@5', 'recall@20', 'mrr']
# Load dataset and get split indices
qa_dataset = load_qa(dataset, human_generated_eval=split == 'human_generated_eval')
split_idx = qa_dataset.get_idx_split()
all_indices = split_idx[split].tolist()
# Thread-safe containers for results
results_list = []
results_lock = Lock()
# Prepare args for each thread
args = [(idx, eval_dict, qa_dataset, evaluator, eval_metrics) for idx in all_indices]
failed_queries = [] # Track failed queries
# Process using threads
with ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = [executor.submit(process_single_instance, arg) for arg in args]
for future in tqdm(as_completed(futures), total=len(futures)):
try:
result = future.result()
with results_lock:
results_list.append(result)
except Exception as e:
query_id = str(e).split('query_id=')[-1].split(':')[0]
failed_queries.append(query_id)
print(f"Error processing instance: {str(e)}")
if failed_queries:
print(f"\nFailed to process {len(failed_queries)} queries.")
print(f"First few failed query_ids: {failed_queries[:5]}")
if not results_list:
raise ValueError("No results were successfully processed")
# Compute final metrics
results_df = pd.DataFrame(results_list)
final_results = {
metric: np.mean(results_df[metric]) for metric in eval_metrics
}
return final_results
except pd.errors.EmptyDataError:
return "Error: The CSV file is empty or could not be read. Please check the file and try again."
except FileNotFoundError:
return f"Error: The file {csv_path} could not be found. Please check the file path and try again."
except Exception as error:
return f"{error}"
# Data dictionaries for leaderboard
data_synthesized_full = {
'Method': ['BM25', 'DPR (roberta)', 'ANCE (roberta)', 'QAGNN (roberta)', 'ada-002', 'voyage-l2-instruct', 'LLM2Vec', 'GritLM-7b', 'multi-ada-002', 'ColBERTv2'],
'STARK-AMAZON_Hit@1': [44.94, 15.29, 30.96, 26.56, 39.16, 40.93, 21.74, 42.08, 40.07, 46.10],
'STARK-AMAZON_Hit@5': [67.42, 47.93, 51.06, 50.01, 62.73, 64.37, 41.65, 66.87, 64.98, 66.02],
'STARK-AMAZON_R@20': [53.77, 44.49, 41.95, 52.05, 53.29, 54.28, 33.22, 56.52, 55.12, 53.44],
'STARK-AMAZON_MRR': [55.30, 30.20, 40.66, 37.75, 50.35, 51.60, 31.47, 53.46, 51.55, 55.51],
'STARK-MAG_Hit@1': [25.85, 10.51, 21.96, 12.88, 29.08, 30.06, 18.01, 37.90, 25.92, 31.18],
'STARK-MAG_Hit@5': [45.25, 35.23, 36.50, 39.01, 49.61, 50.58, 34.85, 56.74, 50.43, 46.42],
'STARK-MAG_R@20': [45.69, 42.11, 35.32, 46.97, 48.36, 50.49, 35.46, 46.40, 50.80, 43.94],
'STARK-MAG_MRR': [34.91, 21.34, 29.14, 29.12, 38.62, 39.66, 26.10, 47.25, 36.94, 38.39],
'STARK-PRIME_Hit@1': [12.75, 4.46, 6.53, 8.85, 12.63, 10.85, 10.10, 15.57, 15.10, 11.75],
'STARK-PRIME_Hit@5': [27.92, 21.85, 15.67, 21.35, 31.49, 30.23, 22.49, 33.42, 33.56, 23.85],
'STARK-PRIME_R@20': [31.25, 30.13, 16.52, 29.63, 36.00, 37.83, 26.34, 39.09, 38.05, 25.04],
'STARK-PRIME_MRR': [19.84, 12.38, 11.05, 14.73, 21.41, 19.99, 16.12, 24.11, 23.49, 17.39]
}
data_synthesized_10 = {
'Method': ['BM25', 'DPR (roberta)', 'ANCE (roberta)', 'QAGNN (roberta)', 'ada-002', 'voyage-l2-instruct', 'LLM2Vec', 'GritLM-7b', 'multi-ada-002', 'ColBERTv2', 'Claude3 Reranker', 'GPT4 Reranker'],
'STARK-AMAZON_Hit@1': [42.68, 16.46, 30.09, 25.00, 39.02, 43.29, 18.90, 43.29, 40.85, 44.31, 45.49, 44.79],
'STARK-AMAZON_Hit@5': [67.07, 50.00, 49.27, 48.17, 64.02, 67.68, 37.80, 71.34, 62.80, 65.24, 71.13, 71.17],
'STARK-AMAZON_R@20': [54.48, 42.15, 41.91, 51.65, 49.30, 56.04, 34.73, 56.14, 52.47, 51.00, 53.77, 55.35],
'STARK-AMAZON_MRR': [54.02, 30.20, 39.30, 36.87, 50.32, 54.20, 28.76, 55.07, 51.54, 55.07, 55.91, 55.69],
'STARK-MAG_Hit@1': [27.81, 11.65, 22.89, 12.03, 28.20, 34.59, 19.17, 38.35, 25.56, 31.58, 36.54, 40.90],
'STARK-MAG_Hit@5': [45.48, 36.84, 37.26, 37.97, 52.63, 50.75, 33.46, 58.64, 50.37, 47.36, 53.17, 58.18],
'STARK-MAG_R@20': [44.59, 42.30, 44.16, 47.98, 49.25, 50.75, 29.85, 46.38, 53.03, 45.72, 48.36, 48.60],
'STARK-MAG_MRR': [35.97, 21.82, 30.00, 28.70, 38.55, 42.90, 26.06, 48.25, 36.82, 38.98, 44.15, 49.00],
'STARK-PRIME_Hit@1': [13.93, 5.00, 6.78, 7.14, 15.36, 12.14, 9.29, 16.79, 15.36, 15.00, 17.79, 18.28],
'STARK-PRIME_Hit@5': [31.07, 23.57, 16.15, 17.14, 31.07, 31.42, 20.7, 34.29, 32.86, 26.07, 36.90, 37.28],
'STARK-PRIME_R@20': [32.84, 30.50, 17.07, 32.95, 37.88, 37.34, 25.54, 41.11, 40.99, 27.78, 35.57, 34.05],
'STARK-PRIME_MRR': [21.68, 13.50, 11.42, 16.27, 23.50, 21.23, 15.00, 24.99, 23.70, 19.98, 26.27, 26.55]
}
data_human_generated = {
'Method': ['BM25', 'DPR (roberta)', 'ANCE (roberta)', 'QAGNN (roberta)', 'ada-002', 'voyage-l2-instruct', 'LLM2Vec', 'GritLM-7b', 'multi-ada-002', 'ColBERTv2', 'Claude3 Reranker', 'GPT4 Reranker'],
'STARK-AMAZON_Hit@1': [27.16, 16.05, 25.93, 22.22, 39.50, 35.80, 29.63, 40.74, 46.91, 33.33, 53.09, 50.62],
'STARK-AMAZON_Hit@5': [51.85, 39.51, 54.32, 49.38, 64.19, 62.96, 46.91, 71.60, 72.84, 55.56, 74.07, 75.31],
'STARK-AMAZON_R@20': [29.23, 15.23, 23.69, 21.54, 35.46, 33.01, 21.21, 36.30, 40.22, 29.03, 35.46, 35.46],
'STARK-AMAZON_MRR': [18.79, 27.21, 37.12, 31.33, 52.65, 47.84, 38.61, 53.21, 58.74, 43.77, 62.11, 61.06],
'STARK-MAG_Hit@1': [32.14, 4.72, 25.00, 20.24, 28.57, 22.62, 16.67, 34.52, 23.81, 33.33, 38.10, 36.90],
'STARK-MAG_Hit@5': [41.67, 9.52, 30.95, 26.19, 41.67, 36.90, 28.57, 44.04, 41.67, 36.90, 45.24, 46.43],
'STARK-MAG_R@20': [32.46, 25.00, 27.24, 28.76, 35.95, 32.44, 21.74, 34.57, 39.85, 30.50, 35.95, 35.95],
'STARK-MAG_MRR': [37.42, 7.90, 27.98, 25.53, 35.81, 29.68, 21.59, 38.72, 31.43, 35.97, 42.00, 40.65],
'STARK-PRIME_Hit@1': [22.45, 2.04, 7.14, 6.12, 17.35, 16.33, 9.18, 25.51, 24.49, 15.31, 28.57, 28.57],
'STARK-PRIME_Hit@5': [41.84, 9.18, 13.27, 13.27, 34.69, 32.65, 21.43, 41.84, 39.80, 26.53, 46.94, 44.90],
'STARK-PRIME_R@20': [42.32, 10.69, 11.72, 17.62, 41.09, 39.01, 26.77, 48.10, 47.21, 25.56, 41.61, 41.61],
'STARK-PRIME_MRR': [30.37, 7.05, 10.07, 9.39, 26.35, 24.33, 15.24, 34.28, 32.98, 19.67, 36.32, 34.82]
}
# Initialize DataFrames
df_synthesized_full = pd.DataFrame(data_synthesized_full)
df_synthesized_10 = pd.DataFrame(data_synthesized_10)
df_human_generated = pd.DataFrame(data_human_generated)
# Model type definitions
model_types = {
'Sparse Retriever': ['BM25'],
'Small Dense Retrievers': ['DPR (roberta)', 'ANCE (roberta)', 'QAGNN (roberta)'],
'LLM-based Dense Retrievers': ['ada-002', 'voyage-l2-instruct', 'LLM2Vec', 'GritLM-7b'],
'Multivector Retrievers': ['multi-ada-002', 'ColBERTv2'],
'LLM Rerankers': ['Claude3 Reranker', 'GPT4 Reranker'],
'Others': [] # Will be populated dynamically with submitted models
}
# Submission form validation functions
def validate_email(email_str):
"""Validate email format(s)"""
emails = [e.strip() for e in email_str.split(';')]
email_pattern = re.compile(r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$')
return all(email_pattern.match(email) for email in emails)
def validate_github_url(url):
"""Validate GitHub URL format"""
github_pattern = re.compile(
r'^https?:\/\/(?:www\.)?github\.com\/[\w-]+\/[\w.-]+\/?$'
)
return bool(github_pattern.match(url))
def validate_csv(file_path):
"""Validate CSV file format and content with better error handling"""
try:
df = pd.read_csv(file_path)
required_cols = ['query_id', 'pred_rank']
# Check for required columns
missing_cols = [col for col in required_cols if col not in df.columns]
if missing_cols:
return False, f"Missing required columns: {', '.join(missing_cols)}"
# Validate first few rows to ensure proper format
for idx, row in df.head().iterrows():
try:
rank_list = eval(row['pred_rank']) if isinstance(row['pred_rank'], str) else row['pred_rank']
if not isinstance(rank_list, list):
return False, f"pred_rank must be a list (row {idx})"
if len(rank_list) < 20:
return False, f"pred_rank must contain at least 20 candidates (row {idx})"
except Exception as e:
return False, f"Invalid pred_rank format in row {idx}: {str(e)}"
return True, "Valid CSV file"
except pd.errors.EmptyDataError:
return False, "CSV file is empty"
except Exception as e:
return False, f"Error processing CSV: {str(e)}"
def sanitize_name(name):
"""Sanitize name for file system use"""
return re.sub(r'[^a-zA-Z0-9]', '_', name)
def read_json_from_hub(api: HfApi, repo_id: str, file_path: str) -> dict:
"""
Read and parse JSON file from HuggingFace Hub.
Args:
api: HuggingFace API instance
repo_id: Repository ID
file_path: Path to file in repository
Returns:
dict: Parsed JSON content
"""
try:
# Download the file content as bytes
content = api.hf_hub_download(
repo_id=repo_id,
filename=file_path,
repo_type="space"
)
# Read and parse JSON
with open(content, 'r') as f:
return json.load(f)
except Exception as e:
print(f"Error reading JSON file {file_path}: {str(e)}")
return None
def scan_submissions_directory():
"""
Scans the submissions directory and updates the model types dictionary
with submitted models.
"""
try:
# Initialize HuggingFace API
api = HfApi()
# Track submissions for each split
submissions_by_split = {
'test': [],
'test-0.1': [],
'human_generated_eval': []
}
# Get all files from repository
try:
all_files = api.list_repo_files(
repo_id=REPO_ID,
repo_type="space"
)
# Filter for files in submissions directory
repo_files = [f for f in all_files if f.startswith('submissions/')]
except Exception as e:
print(f"Error listing repository contents: {str(e)}")
return submissions_by_split
# Group files by team folders
folder_files = {}
for filepath in repo_files:
parts = filepath.split('/')
if len(parts) < 3: # Need at least submissions/team_folder/file
continue
folder_name = parts[1] # team_folder name
if folder_name not in folder_files:
folder_files[folder_name] = []
folder_files[folder_name].append(filepath)
# Process each team folder
for folder_name, files in folder_files.items():
try:
# Find latest.json in this folder
latest_file = next((f for f in files if f.endswith('latest.json')), None)
if not latest_file:
print(f"No latest.json found in {folder_name}")
continue
# Read latest.json
latest_info = read_json_from_hub(api, REPO_ID, latest_file)
if not latest_info:
print(f"Failed to read latest.json for {folder_name}")
continue
timestamp = latest_info.get('latest_submission')
if not timestamp:
print(f"No timestamp found in latest.json for {folder_name}")
continue
# Find metadata file for latest submission
metadata_file = next(
(f for f in files if f.endswith(f'metadata_{timestamp}.json')),
None
)
if not metadata_file:
print(f"No matching metadata file found for {folder_name} timestamp {timestamp}")
continue
# Read metadata file
submission_data = read_json_from_hub(api, REPO_ID, metadata_file)
if not submission_data:
print(f"Failed to read metadata for {folder_name}")
continue
if latest_info.get('status') != 'approved':
print(f"Skipping unapproved submission in {folder_name}")
continue
# Add to submissions by split
split = submission_data.get('Split')
if split in submissions_by_split:
submissions_by_split[split].append(submission_data)
# Update model types if necessary
method_name = submission_data.get('Method Name')
model_type = submission_data.get('Model Type', 'Others')
# Add to model type if it's a new method
method_exists = any(method_name in methods for methods in model_types.values())
if not method_exists and model_type in model_types:
model_types[model_type].append(method_name)
except Exception as e:
print(f"Error processing folder {folder_name}: {str(e)}")
continue
return submissions_by_split
except Exception as e:
print(f"Error scanning submissions directory: {str(e)}")
return None
def initialize_leaderboard():
"""
Initialize the leaderboard with baseline results and submitted results.
"""
global df_synthesized_full, df_synthesized_10, df_human_generated
try:
# First, initialize with baseline results
df_synthesized_full = pd.DataFrame(data_synthesized_full)
df_synthesized_10 = pd.DataFrame(data_synthesized_10)
df_human_generated = pd.DataFrame(data_human_generated)
print("Initialized with baseline results")
# Then scan and add submitted results
submissions = scan_submissions_directory()
if submissions:
for split, split_submissions in submissions.items():
for submission in split_submissions:
if submission.get('results'): # Make sure we have results
# Update appropriate DataFrame based on split
if split == 'test':
df_to_update = df_synthesized_full
elif split == 'test-0.1':
df_to_update = df_synthesized_10
else: # human_generated_eval
df_to_update = df_human_generated
# Prepare new row data
new_row = {
'Method': submission['Method Name'],
f'STARK-{submission["Dataset"].upper()}_Hit@1': submission['results']['hit@1'],
f'STARK-{submission["Dataset"].upper()}_Hit@5': submission['results']['hit@5'],
f'STARK-{submission["Dataset"].upper()}_R@20': submission['results']['recall@20'],
f'STARK-{submission["Dataset"].upper()}_MRR': submission['results']['mrr']
}
# Update existing row or add new one
method_mask = df_to_update['Method'] == submission['Method Name']
if method_mask.any():
for col in new_row:
df_to_update.loc[method_mask, col] = new_row[col]
else:
df_to_update.loc[len(df_to_update)] = new_row
print("Leaderboard initialization complete")
except Exception as e:
print(f"Error initializing leaderboard: {str(e)}")
def get_file_content(file_path):
"""
Helper function to safely read file content from HuggingFace repository
"""
try:
api = HfApi()
content_path = api.hf_hub_download(
repo_id=REPO_ID,
filename=file_path,
repo_type="space"
)
with open(content_path, 'r') as f:
return f.read()
except Exception as e:
print(f"Error reading file {file_path}: {str(e)}")
return None
def save_submission(submission_data, csv_file):
"""
Save submission data and CSV file using model_name_team_name format
Args:
submission_data (dict): Metadata and results for the submission
csv_file: The uploaded CSV file object
"""
# Create folder name from model name and team name
model_name_clean = sanitize_name(submission_data['Method Name'])
team_name_clean = sanitize_name(submission_data['Team Name'])
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# Create folder name: model_name_team_name
folder_name = f"{model_name_clean}_{team_name_clean}"
submission_id = f"{folder_name}_{timestamp}"
# Create submission directory structure
base_dir = "submissions"
submission_dir = os.path.join(base_dir, folder_name)
os.makedirs(submission_dir, exist_ok=True)
# Save CSV file with timestamp to allow multiple submissions
csv_filename = f"predictions_{timestamp}.csv"
csv_path = os.path.join(submission_dir, csv_filename)
if hasattr(csv_file, 'name'):
with open(csv_file.name, 'rb') as source, open(csv_path, 'wb') as target:
target.write(source.read())
# Add file paths to submission data
submission_data.update({
"csv_path": csv_path,
"submission_id": submission_id,
"folder_name": folder_name
})
# Save metadata as JSON with timestamp
metadata_path = os.path.join(submission_dir, f"metadata_{timestamp}.json")
with open(metadata_path, 'w') as f:
json.dump(submission_data, f, indent=4)
# Update latest.json to track most recent submission
latest_path = os.path.join(submission_dir, "latest.json")
with open(latest_path, 'w') as f:
json.dump({
"latest_submission": timestamp,
"status": "pending_review",
"method_name": submission_data['Method Name']
}, f, indent=4)
return submission_id
def update_leaderboard_data(submission_data):
"""
Update leaderboard data with new submission results
Only updates the specific dataset submitted, preventing empty rows
"""
global df_synthesized_full, df_synthesized_10, df_human_generated
# Determine which DataFrame to update based on split
split_to_df = {
'test': df_synthesized_full,
'test-0.1': df_synthesized_10,
'human_generated_eval': df_human_generated
}
df_to_update = split_to_df[submission_data['Split']]
dataset = submission_data['Dataset'].upper()
# Prepare new row data with only the relevant dataset columns
new_row = {
'Method': submission_data['Method Name']
}
# Only add metrics for the submitted dataset
new_row.update({
f'STARK-{dataset}_Hit@1': submission_data['results']['hit@1'],
f'STARK-{dataset}_Hit@5': submission_data['results']['hit@5'],
f'STARK-{dataset}_R@20': submission_data['results']['recall@20'],
f'STARK-{dataset}_MRR': submission_data['results']['mrr']
})
# Check if method already exists
method_mask = df_to_update['Method'] == submission_data['Method Name']
if method_mask.any():
# Update only the columns for the submitted dataset
for col in new_row:
df_to_update.loc[method_mask, col] = new_row[col]
else:
# For new methods, create a row with only the submitted dataset's values
df_to_update.loc[len(df_to_update)] = new_row
# Function to get emails from meta_data
def get_emails_from_metadata(meta_data):
"""
Extracts emails from the meta_data dictionary.
Args:
meta_data (dict): The metadata dictionary that contains the 'Contact Email(s)' field.
Returns:
list: A list of email addresses.
"""
return [email.strip() for email in meta_data.get("Contact Email(s)", "").split(";")]
# Function to format meta_data as an HTML table (without Prediction CSV)
def format_metadata_as_table(meta_data):
"""
Formats metadata dictionary into an HTML table for the email.
Handles multiple contact emails separated by a semicolon.
Args:
meta_data (dict): Dictionary containing submission metadata.
Returns:
str: HTML string representing the metadata table.
"""
table_rows = ""
for key, value in meta_data.items():
if key == "Contact Email(s)":
# Ensure that contact emails are split by semicolon
emails = value.split(';')
formatted_emails = "; ".join([email.strip() for email in emails])
table_rows += f"<tr><td><b>{key}</b></td><td>{formatted_emails}</td></tr>"
elif key != "Prediction CSV": # Exclude the Prediction CSV field
table_rows += f"<tr><td><b>{key}</b></td><td>{value}</td></tr>"
table_html = f"""
<table border="1" cellpadding="5" cellspacing="0">
{table_rows}
</table>
"""
return table_html
# Function to get emails from meta_data
def get_emails_from_metadata(meta_data):
"""
Extracts emails from the meta_data dictionary.
Args:
meta_data (dict): The metadata dictionary that contains the 'Contact Email(s)' field.
Returns:
list: A list of email addresses.
"""
return [email.strip() for email in meta_data.get("Contact Email(s)", "").split(";")]
def format_evaluation_results(results):
"""
Formats the evaluation results dictionary into a readable string.
Args:
results (dict): Dictionary containing evaluation metrics and their values.
Returns:
str: Formatted string of evaluation results.
"""
result_lines = [f"{metric}: {value}" for metric, value in results.items()]
return "\n".join(result_lines)
def get_model_type_for_method(method_name):
"""
Find the model type category for a given method name.
Returns 'Others' if not found in predefined categories.
"""
for type_name, methods in model_types.items():
if method_name in methods:
return type_name
return 'Others'
def validate_model_type(method_name, selected_type):
"""
Validate if the selected model type is appropriate for the method name.
Returns (is_valid, message).
"""
# Check if method exists in any category
existing_type = None
for type_name, methods in model_types.items():
if method_name in methods:
existing_type = type_name
break
# If method exists, it must be submitted under its predefined category
if existing_type:
if existing_type != selected_type:
return False, f"This method name is already registered under '{existing_type}'. Please use the correct category."
return True, "Valid model type"
# For new methods, any category is valid
return True, "Valid model type"
def process_submission(
method_name, team_name, dataset, split, contact_email,
code_repo, csv_file, model_description, hardware, paper_link, model_type
):
"""Process submission with progress updates"""
temp_files = []
start_time = time.time()
try:
# 1. Initial validation
yield "Validating submission details..."
if not all([method_name, team_name, dataset, split, contact_email, code_repo, csv_file, model_type]):
return "Error: Please fill in all required fields"
# 2. Process CSV
yield "Processing CSV file..."
temp_csv_path = None
if isinstance(csv_file, str):
temp_csv_path = csv_file
else:
try:
temp_fd, temp_csv_path = tempfile.mkstemp(suffix='.csv')
temp_files.append(temp_csv_path)
os.close(temp_fd)
shutil.copy2(csv_file.name, temp_csv_path)
except Exception as e:
return f"Error processing CSV file: {str(e)}"
# 3. Validate CSV format
yield "Validating CSV format..."
try:
df = pd.read_csv(temp_csv_path)
if 'query_id' not in df.columns or 'pred_rank' not in df.columns:
return "Error: CSV must contain 'query_id' and 'pred_rank' columns"
except Exception as e:
return f"Error reading CSV: {str(e)}"
# 4. Compute metrics with reduced workers
yield f"Computing metrics for {dataset}..."
results = compute_metrics(
csv_path=temp_csv_path,
dataset=dataset.lower(),
split=split,
num_threads=4
)
if isinstance(results, str):
return f"Evaluation error: {results}"
# 5. Process results
yield "Processing results..."
processed_results = {
"hit@1": round(results['hit@1'] * 100, 2),
"hit@5": round(results['hit@5'] * 100, 2),
"recall@20": round(results['recall@20'] * 100, 2),
"mrr": round(results['mrr'] * 100, 2)
}
# 6. Save submission
yield "Saving submission..."
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
folder_name = f"{sanitize_name(method_name)}_{sanitize_name(team_name)}"
submission_data = {
"Method Name": method_name,
"Team Name": team_name,
"Dataset": dataset,
"Split": split,
"Contact Email(s)": contact_email,
"Code Repository": code_repo,
"Model Description": model_description,
"Hardware": hardware,
"Paper link": paper_link,
"Model Type": model_type,
"results": processed_results
}
try:
# Save to HuggingFace Hub
csv_path_in_repo = f"submissions/{folder_name}/predictions_{timestamp}.csv"
hub_storage.save_to_hub(
file_content=temp_csv_path,
path_in_repo=csv_path_in_repo,
commit_message=f"Add submission: {method_name}"
)
except Exception as e:
return f"Error saving to HuggingFace Hub: {str(e)}"
# 7. Update leaderboard
yield "Updating leaderboard..."
update_leaderboard_data(submission_data)
return f"""
Submission successful!
Evaluation Results:
Hit@1: {processed_results['hit@1']:.2f}%
Hit@5: {processed_results['hit@5']:.2f}%
Recall@20: {processed_results['recall@20']:.2f}%
MRR: {processed_results['mrr']:.2f}%
Your submission will appear in the leaderboard after review.
"""
except Exception as e:
total_time = time.time() - start_time
return f"Error ({total_time:.1f}s): {str(e)}"
finally:
for temp_file in temp_files:
try:
if os.path.exists(temp_file):
os.unlink(temp_file)
except Exception as e:
print(f"Warning: Failed to delete {temp_file}: {str(e)}")
def filter_by_model_type(df, selected_types):
"""
Filter DataFrame by selected model types, including submitted models.
"""
if not selected_types:
return df.head(0)
# Get all models from selected types
selected_models = []
for type_name in selected_types:
selected_models.extend(model_types[type_name])
# Filter DataFrame to include only selected models
return df[df['Method'].isin(selected_models)]
def format_dataframe(df, dataset):
"""
Format DataFrame for display, removing rows with no data for the selected dataset
"""
# Select relevant columns
columns = ['Method'] + [col for col in df.columns if dataset in col]
filtered_df = df[columns].copy()
# Remove rows where all metric columns are empty/NaN for this dataset
metric_columns = [col for col in filtered_df.columns if col != 'Method']
filtered_df = filtered_df.dropna(subset=metric_columns, how='all')
# Rename columns to remove dataset prefix
filtered_df.columns = [col.split('_')[-1] if '_' in col else col for col in filtered_df.columns]
# Sort by MRR
if 'MRR' in filtered_df.columns:
filtered_df = filtered_df.sort_values('MRR', ascending=False)
return filtered_df
def update_tables(selected_types):
"""
Update tables based on selected model types.
Include all models from selected categories.
"""
if not selected_types:
return [df.head(0) for df in [df_synthesized_full, df_synthesized_10, df_human_generated]]
filtered_df_full = filter_by_model_type(df_synthesized_full, selected_types)
filtered_df_10 = filter_by_model_type(df_synthesized_10, selected_types)
filtered_df_human = filter_by_model_type(df_human_generated, selected_types)
outputs = []
for df in [filtered_df_full, filtered_df_10, filtered_df_human]:
for dataset in ['AMAZON', 'MAG', 'PRIME']:
outputs.append(format_dataframe(df, f"STARK-{dataset}"))
return outputs
css = """
table > thead {
white-space: normal
}
table {
--cell-width-1: 250px
}
table > tbody > tr > td:nth-child(2) > div {
overflow-x: auto
}
.tab-nav {
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
margin-bottom: 1rem;
}
"""
# Main application
with gr.Blocks(css=css) as demo:
gr.Markdown("# Semi-structured Retrieval Benchmark (STaRK) Leaderboard")
gr.Markdown("Refer to the [STaRK paper](https://arxiv.org/pdf/2404.13207) for details on metrics, tasks and models.")
# Initialize leaderboard at startup
print("Starting leaderboard initialization...")
initialize_leaderboard()
print("Leaderboard initialization finished")
# Model type filter
model_type_filter = gr.CheckboxGroup(
choices=list(model_types.keys()),
value=list(model_types.keys()),
label="Model types",
interactive=True
)
# Initialize dataframes list
all_dfs = []
# Create nested tabs structure
with gr.Tabs() as outer_tabs:
with gr.TabItem("Synthesized (full)"):
with gr.Tabs() as inner_tabs1:
for dataset in ['AMAZON', 'MAG', 'PRIME']:
with gr.TabItem(dataset):
all_dfs.append(gr.DataFrame(interactive=False))
with gr.TabItem("Synthesized (10%)"):
with gr.Tabs() as inner_tabs2:
for dataset in ['AMAZON', 'MAG', 'PRIME']:
with gr.TabItem(dataset):
all_dfs.append(gr.DataFrame(interactive=False))
with gr.TabItem("Human-Generated"):
with gr.Tabs() as inner_tabs3:
for dataset in ['AMAZON', 'MAG', 'PRIME']:
with gr.TabItem(dataset):
all_dfs.append(gr.DataFrame(interactive=False))
# Submission section
gr.Markdown("---")
gr.Markdown("## Submit Your Results")
gr.Markdown("""
Submit your results to be included in the leaderboard. Please ensure your submission meets all requirements.
For questions, contact stark-qa@cs.stanford.edu. Detailed instructions can be referred at [submission instructions](https://docs.google.com/document/d/11coGjTmOEi9p9-PUq1oy0eTOj8f_8CVQhDl5_0FKT14/edit?usp=sharing).
""")
with gr.Row():
with gr.Column():
method_name = gr.Textbox(
label="Method Name (max 25 chars)*",
placeholder="e.g., MyRetrievalModel-v1"
)
dataset = gr.Dropdown(
choices=["amazon", "mag", "prime"],
label="Dataset*",
value="amazon"
)
split = gr.Dropdown(
choices=["test", "test-0.1", "human_generated_eval"],
label="Split*",
value="test"
)
team_name = gr.Textbox(
label="Team Name (max 25 chars)*",
placeholder="e.g., Stanford NLP"
)
contact_email = gr.Textbox(
label="Contact Email(s)*",
placeholder="email@example.com; another@example.com"
)
model_type = gr.Dropdown(
choices=list(model_types.keys()),
label="Model Type*",
value="Others",
info="Select the appropriate category for your model"
)
with gr.Column():
model_description = gr.Textbox(
label="Model Description*",
lines=3,
placeholder="Briefly describe how your retriever model works..."
)
code_repo = gr.Textbox(
label="Code Repository*",
placeholder="https://github.com/snap-stanford/stark-leaderboard"
)
hardware = gr.Textbox(
label="Hardware Specifications*",
placeholder="e.g., 4x NVIDIA A100 80GB"
)
csv_file = gr.File(
label="Prediction CSV*",
file_types=[".csv"],
type="filepath"
)
paper_link = gr.Textbox(
label="Paper Link (Optional)",
placeholder="https://arxiv.org/abs/..."
)
submit_btn = gr.Button("Submit", variant="primary")
result = gr.Textbox(label="Submission Status", interactive=False)
# Set up event handlers
model_type_filter.change(
update_tables,
inputs=[model_type_filter],
outputs=all_dfs
)
# Event handler for submission button
submit_btn.click(
fn=process_submission,
inputs=[
method_name, team_name, dataset, split, contact_email,
code_repo, csv_file, model_description, hardware, paper_link, model_type
],
outputs=result,
api_name="submit"
).success( # Add success handler to update tables
fn=update_tables,
inputs=[model_type_filter],
outputs=all_dfs
)
# Initial table update
demo.load(
update_tables,
inputs=[model_type_filter],
outputs=all_dfs
)
# Launch the application
demo.launch()