Spaces:
Running
on
Zero
Running
on
Zero
""" | |
Live monitor of the website statistics and leaderboard. | |
Dependency: | |
sudo apt install pkg-config libicu-dev | |
pip install pytz gradio gdown plotly polyglot pyicu pycld2 tabulate | |
""" | |
import argparse | |
import ast | |
import pickle | |
import os | |
import threading | |
import time | |
import gradio as gr | |
import numpy as np | |
import pandas as pd | |
import json | |
from datetime import datetime | |
# def make_leaderboard_md(elo_results): | |
# leaderboard_md = f""" | |
# # π Chatbot Arena Leaderboard | |
# | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | | |
# This leaderboard is based on the following three benchmarks. | |
# - [Chatbot Arena](https://lmsys.org/blog/2023-05-03-arena/) - a crowdsourced, randomized battle platform. We use 100K+ user votes to compute Elo ratings. | |
# - [MT-Bench](https://arxiv.org/abs/2306.05685) - a set of challenging multi-turn questions. We use GPT-4 to grade the model responses. | |
# - [MMLU](https://arxiv.org/abs/2009.03300) (5-shot) - a test to measure a model's multitask accuracy on 57 tasks. | |
# π» Code: The Arena Elo ratings are computed by this [notebook]({notebook_url}). The MT-bench scores (single-answer grading on a scale of 10) are computed by [fastchat.llm_judge](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge). The MMLU scores are mostly computed by [InstructEval](https://github.com/declare-lab/instruct-eval). Higher values are better for all benchmarks. Empty cells mean not available. Last updated: November, 2023. | |
# """ | |
# return leaderboard_md | |
def make_leaderboard_md(): | |
leaderboard_md = f""" | |
# π K-Sort-Arena Leaderboard (Text-to-Image) | |
""" | |
return leaderboard_md | |
def make_leaderboard_video_md(): | |
leaderboard_md = f""" | |
# π K-Sort-Arena Leaderboard (Text-to-Video) | |
""" | |
return leaderboard_md | |
def model_hyperlink(model_name, link): | |
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>' | |
def make_arena_leaderboard_md(total_models, total_votes, last_updated): | |
# last_updated = datetime.now() | |
# last_updated = last_updated.strftime("%Y-%m-%d") | |
leaderboard_md = f""" | |
Total #models: **{total_models}** (anonymous). Total #votes: **{total_votes}** (Equivalent to **{total_votes*6}** votes for one-on-one games). | |
\n Last updated: {last_updated}. | |
""" | |
return leaderboard_md | |
def make_disclaimer_md(): | |
disclaimer_md = f''' | |
<div id="modal" style="display:none; position:fixed; top:50%; left:50%; transform:translate(-50%, -50%); padding:20px; background:white; box-shadow:0 0 10px rgba(0,0,0,0.5); z-index:1000;"> | |
<p style="font-size:24px;"><strong>Disclaimer</strong></p> | |
<p style="font-size:18px;"><b>Purpose and Scope</b></b></p> | |
<p><b>This platform is designed for academic use, providing a space for evaluating and comparing Visual Generation Models. The information and services provided are intended for research and educational purposes only.</b></p> | |
<p style="font-size:18px;"><b>Privacy and Data Protection</b></p> | |
<p><b>While users may voluntarily submit their names and institutional affiliations, this information is not required and is collected solely for the purpose of academic recognition. Personal information submitted to this platform will be handled with care and used solely for the intended academic purposes. We are committed to protecting your privacy, and we will not share personal data with third parties without explicit consent.</b></p> | |
<p style="font-size:18px;"><b>Source of Models</b></p> | |
<p><b>All models evaluated and displayed on this platform are obtained from official sources, including but not limited to official repositories and Replicate.</b></p> | |
<p style="font-size:18px;"><b>Limitations of Liability</b></p> | |
<p><b>The platform and its administrators do not assume any legal liability for the use or interpretation of the information provided. The evaluations and comparisons are for academic purposes. Users should verify the information independently and must not use the platform for any illegal, harmful, violent, racist, or sexual purposes.</b></p> | |
<p style="font-size:18px;"><b>Modification of Terms</b></p> | |
<p><b>We reserve the right to modify these terms at any time. Users will be notified of significant changes through updates on the platform.</b></p> | |
<p style="font-size:18px;"><b>Contact Information</b></p> | |
<p><b>For any questions or to report issues, please contact us at info@ksort.org.</b></p> | |
</div> | |
<div id="overlay" style="display:none; position:fixed; top:0; left:0; width:100%; height:100%; background:rgba(0,0,0,0.5); z-index:999;" onclick="document.getElementById('modal').style.display='none'; document.getElementById('overlay').style.display='none'"></div> | |
<p> β οΈ This platform is designed for academic usage, for details please refer to <a href="#" id="open_link" onclick="document.getElementById('modal').style.display='block'; document.getElementById('overlay').style.display='block'">disclaimer</a>.</p> | |
''' | |
return disclaimer_md | |
def make_arena_leaderboard_data(results): | |
import pandas as pd | |
df = pd.DataFrame(results) | |
return df | |
def build_leaderboard_tab(score_result_file = 'sorted_score_list.json'): | |
with open(score_result_file, "r") as json_file: | |
data = json.load(json_file) | |
score_results = data["sorted_score_list"] | |
total_models = data["total_models"] | |
total_votes = data["total_votes"] | |
last_updated = data["last_updated"] | |
md = make_leaderboard_md() | |
md_1 = gr.Markdown(md, elem_id="leaderboard_markdown") | |
gr.HTML(make_disclaimer_md) | |
with gr.Tab("Arena Score", id=0): | |
md = make_arena_leaderboard_md(total_models, total_votes, last_updated) | |
gr.Markdown(md, elem_id="leaderboard_markdown") | |
md = make_arena_leaderboard_data(score_results) | |
gr.Dataframe(md) | |
gr.Markdown( | |
""" ## The leaderboard is updated frequently and continues to incorporate new models. | |
""", | |
elem_id="leaderboard_markdown", | |
) | |
from .utils import acknowledgment_md, html_code | |
with gr.Blocks(): | |
gr.Markdown(acknowledgment_md) | |
def build_leaderboard_video_tab(score_result_file = 'sorted_score_list_video.json'): | |
with open(score_result_file, "r") as json_file: | |
data = json.load(json_file) | |
score_results = data["sorted_score_list"] | |
total_models = data["total_models"] | |
total_votes = data["total_votes"] | |
last_updated = data["last_updated"] | |
md = make_leaderboard_video_md() | |
md_1 = gr.Markdown(md, elem_id="leaderboard_markdown") | |
gr.HTML(make_disclaimer_md) | |
with gr.Tab("Arena Score", id=0): | |
md = make_arena_leaderboard_md(total_models, total_votes, last_updated) | |
gr.Markdown(md, elem_id="leaderboard_markdown") | |
md = make_arena_leaderboard_data(score_results) | |
gr.Dataframe(md) | |
gr.Markdown( | |
""" ## The leaderboard is updated frequently and continues to incorporate new models. | |
""", | |
elem_id="leaderboard_markdown", | |
) | |
from .utils import acknowledgment_md, html_code | |
with gr.Blocks(): | |
gr.Markdown(acknowledgment_md) | |