yuhan Zhang commited on
Commit
f216956
ยท
1 Parent(s): de9cce7

upload leaderboard

Browse files
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import json
4
+ from pathlib import Path
5
+
6
+ from serve.css import block_css
7
+ from serve.markdown import *
8
+ from serve.leaderboard import build_leaderboard_tab
9
+ from serve.model import model_config
10
+ # SERVER_PORT, ROOT_PATH, ELO_RESULTS_DIR
11
+ SERVER_PORT = 7860
12
+ LEADERBOARD_DIR = "leaderboards"
13
+
14
+ def build_combine_demo():
15
+ with gr.Blocks(
16
+ title="",
17
+ theme=gr.themes.Default(),
18
+ css=block_css,
19
+ ) as demo:
20
+ gr.Markdown(get_title_md(), elem_id="title_markdown")
21
+ gr.Markdown(get_intro_md(), elem_id="intro_markdown")
22
+
23
+ gr.Markdown("Models", elem_id="subtitle_markdown")
24
+ gr.Markdown(get_model_intro_md(), elem_id="intro_markdown")
25
+ with gr.Accordion("๐Ÿ” Expand to see detailed generative models involved", open=False):
26
+ gr.Markdown(get_model_description_md(model_config), elem_id="model_description_markdown")
27
+
28
+ gr.Markdown("Criteria", elem_id="subtitle_markdown")
29
+ gr.Markdown(get_object_dimension_intro_md(), elem_id="intro_markdown")
30
+ with gr.Accordion("๐Ÿ” Expand to see detailed evaluation dimensions", open=False):
31
+ gr.Markdown(get_object_dimension_description_md(), elem_id="evaldim_markdown")
32
+
33
+ gr.Markdown("Leaderboard", elem_id="subtitle_markdown")
34
+ # gr.Markdown(get_leaderboard_intro_md(), elem_id="intro_markdown")
35
+ with gr.Tab("Hi3DEval"):
36
+ gr.Markdown(get_hi3deval_intro_md(), elem_id="intro_markdown")
37
+ hi3deval_leaderboard_file = gr.Textbox(value=os.path.join(LEADERBOARD_DIR, "object_hi3deval.csv"), visible=False)
38
+ hi3deval_task_dropdown = gr.Dropdown(
39
+ label="Choose Task",
40
+ choices=["Full Leaderboard", "Text-to-3D only", "Image-to-3D only"],
41
+ value="Full Leaderboard", # ้ป˜่ฎคๅ€ผ
42
+ interactive=True, # ๅฏไบคไบ’
43
+ multiselect=False # ๅ•้€‰
44
+ )
45
+ hi3deval_leaderboard_table = build_leaderboard_tab(hi3deval_leaderboard_file)
46
+ gr.Markdown(get_citation_md("hi3deval"), elem_id="ack_markdown")
47
+
48
+ # with gr.Tab("Image-to-3D Generation", id=1):
49
+ # build_leaderboard_tab()
50
+
51
+ hi3deval_task_dropdown.change(
52
+ build_leaderboard_tab,
53
+ [hi3deval_leaderboard_file, hi3deval_task_dropdown],
54
+ hi3deval_leaderboard_table
55
+ )
56
+ return demo
57
+
58
+
59
+ if __name__ == "__main__":
60
+ demo = build_combine_demo()
61
+ # demo.queue(max_size=20).launch(server_port=server_port, root_path=ROOT_PATH, debug=True)
62
+ demo.queue(max_size=20).launch(server_port=SERVER_PORT, debug=True)
leaderboards/object_hi3deval.csv ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Method,Task,Geometry Plausibility,Geometry Details,Texture Quality,Geo.-Tex. Coherence,Prompt-3D Alignment,Overall
2
+ hunyuan3d-2.0,"Image-to-3D",6.2919,2.7215,2.7644,0.9876,3.4334,16.1988
3
+ trellis,"Image-to-3D",5.8626,2.392,2.4693,0.9702,3.5048,15.1989
4
+ spar3d,"Image-to-3D",5.7791,2.3031,2.4749,0.9601,3.4842,15.0014
5
+ triposr,"Image-to-3D",5.2216,2.4225,2.3758,0.9562,3.3643,14.3404
6
+ instantmesh,"Image-to-3D",5.4242,2.2252,2.3063,0.9587,3.363,14.2775
7
+ crm,"Image-to-3D",4.745,2.2991,2.3777,0.9164,3.219,13.5572
8
+ mvdream,"Text-to-3D",4.4064,2.742,2.8116,0.951,2.5879,13.4989
9
+ unique3d,"Image-to-3D",4.9288,2.3233,1.9627,0.776,3.1989,13.1897
10
+ openlrm,"Image-to-3D",3.7754,2.2614,2.0922,0.902,2.2298,11.2608
11
+ wonder3d,"Image-to-3D",3.7879,2.0092,1.9658,0.9255,2.0874,10.7758
12
+ sz123,"Image-to-3D",3.6052,1.6548,2.0293,0.8902,2.2578,10.4374
13
+ magic123,"Image-to-3D",3.4617,1.74,2.0094,0.898,2.2171,10.3262
14
+ grm-i,"Image-to-3D",3.2932,1.857,1.8885,0.849,2.0735,9.9612
15
+ lgm,"Image-to-3D",3.2148,1.6733,1.8891,0.8118,2.0304,9.6193
16
+ lucid-dreamer,"Text-to-3D",2.9346,1.5891,2.1069,0.8333,2.0297,9.4936
17
+ grm-t,"Text-to-3D",3.0096,1.6627,1.7389,0.898,1.793,9.1023
18
+ latent-nerf,"Text-to-3D",2.7265,1.7067,1.7065,0.8412,1.7688,8.7497
19
+ magic3d,"Text-to-3D",2.9015,1.6239,1.5395,0.9431,1.5618,8.5698
20
+ syncdreamer,"Image-to-3D",2.9423,1.5323,1.2134,0.8529,1.2776,7.8185
21
+ dreamfusion,"Text-to-3D",2.669,1.2525,1.183,0.9137,1.3446,7.3627
22
+ triplane-gaussian,"Image-to-3D",2.2948,1.1859,1.2028,0.6647,1.2908,6.6389
requirements.txt ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ altair==5.5.0
3
+ annotated-types==0.7.0
4
+ anyio==4.9.0
5
+ attrs==25.3.0
6
+ Brotli==1.1.0
7
+ certifi==2025.7.14
8
+ charset-normalizer==3.4.2
9
+ click==8.2.1
10
+ contourpy==1.3.2
11
+ cycler==0.12.1
12
+ exceptiongroup==1.3.0
13
+ fastapi==0.116.1
14
+ ffmpy==0.6.1
15
+ filelock==3.18.0
16
+ fonttools==4.59.0
17
+ fsspec==2025.7.0
18
+ gradio==4.44.1
19
+ gradio_client==1.3.0
20
+ groovy==0.1.2
21
+ h11==0.16.0
22
+ hf-xet==1.1.5
23
+ httpcore==1.0.9
24
+ httpx==0.28.1
25
+ huggingface-hub==0.34.1
26
+ idna==3.10
27
+ importlib_resources==6.5.2
28
+ Jinja2==3.1.6
29
+ jsonschema==4.25.0
30
+ jsonschema-specifications==2025.4.1
31
+ kiwisolver==1.4.8
32
+ markdown-it-py==3.0.0
33
+ MarkupSafe==2.1.5
34
+ matplotlib==3.10.3
35
+ mdurl==0.1.2
36
+ narwhals==1.48.1
37
+ numpy==1.26.0
38
+ orjson==3.11.1
39
+ packaging==25.0
40
+ pandas==2.3.1
41
+ pillow==10.4.0
42
+ pydantic==2.11.7
43
+ pydantic_core==2.33.2
44
+ pydub==0.25.1
45
+ Pygments==2.19.2
46
+ pyparsing==3.2.3
47
+ python-dateutil==2.9.0.post0
48
+ python-multipart==0.0.20
49
+ pytz==2025.2
50
+ PyYAML==6.0.2
51
+ referencing==0.36.2
52
+ requests==2.32.4
53
+ rich==14.1.0
54
+ rpds-py==0.26.0
55
+ ruff==0.12.5
56
+ safehttpx==0.1.6
57
+ semantic-version==2.10.0
58
+ shellingham==1.5.4
59
+ six==1.17.0
60
+ sniffio==1.3.1
61
+ starlette==0.47.2
62
+ tomlkit==0.12.0
63
+ tqdm==4.67.1
64
+ typer==0.16.0
65
+ typing-inspection==0.4.1
66
+ typing_extensions==4.14.1
67
+ tzdata==2025.2
68
+ urllib3==2.5.0
69
+ uvicorn==0.35.0
70
+ websockets==11.0.3
serve/__pycache__/css.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
serve/__pycache__/leaderboard.cpython-310.pyc ADDED
Binary file (3.2 kB). View file
 
serve/__pycache__/leaderboard.cpython-39.pyc ADDED
Binary file (3.33 kB). View file
 
serve/__pycache__/markdown.cpython-310.pyc ADDED
Binary file (5.28 kB). View file
 
serve/__pycache__/markdown.cpython-39.pyc ADDED
Binary file (674 Bytes). View file
 
serve/__pycache__/model.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
serve/__pycache__/model.cpython-39.pyc ADDED
Binary file (3.07 kB). View file
 
serve/css.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ block_css = """
2
+ #title_markdown * {
3
+ font-size: 125% !important;
4
+ font-weight: bold;
5
+ text-align: center;
6
+ margin-bottom: 12px;
7
+ }
8
+ #subtitle_markdown * {
9
+ font-size: 110% !important;
10
+ font-weight: bold;
11
+ text-align: left;
12
+ margin-bottom: 2px;
13
+ }
14
+ #intro_markdown * {
15
+ font-size: 102% !important;
16
+ line-height: 1.2;
17
+ }
18
+ #model_description_markdown * {
19
+ font-size: 102% !important;
20
+ }
21
+ #model_description_markdown th {
22
+ display: none;
23
+ }
24
+ #model_description_markdown tr {
25
+ border: none !important;
26
+ }
27
+ #model_description_markdown table,
28
+ #model_description_markdown td {
29
+ font-size: 100% !important;
30
+ border: none !important;
31
+ outline: none !important;
32
+ box-shadow: none !important;
33
+ background: none !important;
34
+ padding-top: 6px;
35
+ padding-bottom: 6px;
36
+ }
37
+ #evaldim_markdown * {
38
+ font-size: 102% !important;
39
+ line-height: 1.4;
40
+ }
41
+ #about_markdown {
42
+ font-size: 110%
43
+ }
44
+ #ack_markdown {
45
+ font-size: 100%
46
+ }
47
+
48
+ #leaderboard_markdown {
49
+ font-size: 110%
50
+ }
51
+ #leaderboard_markdown td {
52
+ padding-top: 6px;
53
+ padding-bottom: 6px;
54
+ }
55
+ #leaderboard_dataframe td {
56
+ line-height: 0.1em;
57
+ font-weight: bold;
58
+ }
59
+ #input_box textarea {
60
+ font-weight: bold;
61
+ font-size: 125%;
62
+ }
63
+ footer {
64
+ display:none !important
65
+ }
66
+ """
serve/leaderboard.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import ast
3
+ import pickle
4
+ import os
5
+ import threading
6
+ import time
7
+
8
+ import gradio as gr
9
+ import numpy as np
10
+ import pandas as pd
11
+ from serve.model import model_config
12
+
13
+ def hyperlink(name, link):
14
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); \
15
+ text-decoration: underline;text-decoration-style: dotted;">{name}</a>'
16
+
17
+ def amend_model_name(name, rank):
18
+ model_name = model_config[name].model_name
19
+ if rank==1:
20
+ return "๐Ÿฅ‡ " + model_name
21
+ elif rank==2:
22
+ return "๐Ÿฅˆ " + model_name
23
+ elif rank==3:
24
+ return '๐Ÿฅ‰ ' + model_name
25
+ else:
26
+ return model_name
27
+
28
+ def get_cfg_info(name):
29
+ config = model_config[name]
30
+ links = []
31
+ if config.page_link:
32
+ links.append(hyperlink("Page", config.page_link))
33
+ if config.code_link:
34
+ links.append(hyperlink("Code", config.code_link))
35
+ return ", ".join(links) if links else "N/A", config.organization if config.organization else "N/A"
36
+
37
+ def get_leaderboard_values(leaderboard_df):
38
+ leaderboard_vals = []
39
+ for i, row in leaderboard_df.iterrows():
40
+ rank = i+1
41
+ model_name = row["Method"]
42
+ task = row["Task"]
43
+ if model_name not in model_config.keys() or model_config[model_name].task != task:
44
+ continue
45
+
46
+ values = [rank, amend_model_name(model_name, rank), task]
47
+ values = values + [row.get(dim, np.NaN) for dim in leaderboard_df.columns[2:]]
48
+ # values.append(round(np.sum([v for v in values[3:] if pd.notna(v)]), 4))
49
+
50
+ links, organization = get_cfg_info(model_name)
51
+ # values.append(links)
52
+ values.append(organization)
53
+
54
+ leaderboard_vals.append(values)
55
+ return leaderboard_vals
56
+
57
+ def get_topk_ranks(df, k=3):
58
+ ranks = {}
59
+ for col_idx, col in enumerate(df.columns[2:]): # skip "Model" โ€œTask"
60
+ topk = df[col].nlargest(k)
61
+ for rank, idx in enumerate(topk.index):
62
+ if idx not in ranks:
63
+ ranks[idx] = {}
64
+ ranks[idx][col_idx] = rank + 1 # 1-based rank
65
+ for i in range(k): ranks[i][5] = i + 1
66
+ return ranks # dict: row -> {col: rank}
67
+
68
+ def build_leaderboard_tab(leaderboard_file: str, task: str = ""):
69
+ if not isinstance(leaderboard_file, str):
70
+ leaderboard_file = leaderboard_file.value
71
+ if not isinstance(task, str):
72
+ task = task.value
73
+
74
+ df = pd.read_csv(leaderboard_file)
75
+ if task in ["Text-to-3D only", "Image-to-3D only"]:
76
+ df = df[df["Task"] == task.split()[0]]
77
+ # df = df.drop(df[df["Task"]!=task.split()[0]].index)
78
+ leaderboard_df = df.drop(df[df["Method"].isnull()].index)
79
+ leaderboard_df = leaderboard_df.reset_index(drop=True)
80
+
81
+ leaderboard_vals = get_leaderboard_values(leaderboard_df)
82
+ leaderboard = gr.Dataframe(
83
+ headers = ['Rank', "๐Ÿค– Model", "๐Ÿชง Task" ]
84
+ + [f"{dim}" for dim in leaderboard_df.keys()[2:-1]]
85
+ + ["โญ Overall", "๐Ÿ›๏ธ Orgnization"], # "๐Ÿ”— Links",
86
+ datatype = ["number", "str", "str"]
87
+ + ["number"] * (len(leaderboard_df.columns) - 3)
88
+ + ["number", "str"],
89
+ value = leaderboard_vals,
90
+ height = 680,
91
+ column_widths = [60, 140, 100]
92
+ + [120] * (len(leaderboard_df.columns) - 3)
93
+ + [120, 160],
94
+ wrap = True,
95
+ )
96
+ return leaderboard
serve/markdown.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def hyperlink(name, link):
3
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); \
4
+ text-decoration: underline;text-decoration-style: dotted;">{name}</a>'
5
+
6
+ def get_title_md():
7
+ md = f'''
8
+ # ๐Ÿ† Leaderboard for 3D Generative Models
9
+ '''
10
+ return md
11
+
12
+ def get_intro_md():
13
+ md = '''
14
+ This leaderboard provides a centralized evaluation platform for evaluating and tracking the performance of 3D generation models.
15
+ '''
16
+ return md
17
+
18
+ def get_model_intro_md():
19
+ md = '''
20
+ This leaderboard spans a diverse set of state-of-the-art 3D generation models, including different conditional settings such as images, text, or combinations thereof.
21
+ '''
22
+ return md
23
+
24
+ def get_model_description_md(model_config, cols=10, except_models=[]):
25
+ model_list = {}
26
+ for cfg in model_config.values():
27
+ task = cfg.task
28
+ model_name = cfg.model_name
29
+ model_link = cfg.page_link if cfg.page_link else cfg.code_link
30
+ if task not in model_list.keys():
31
+ model_list[task] = set()
32
+ if model_name not in except_models:
33
+ model_list[task].add(hyperlink(model_name, model_link))
34
+
35
+ model_descriptions = ""
36
+ for task, models in model_list.items():
37
+ model_descriptions += f"\n**{len(models)} {task} Generative Models**\n"
38
+ ## model_table
39
+
40
+ model_descriptions += '<table style="width:100%; text-align:left; border:none; border-collapse: collapse;">\n'
41
+ for i, model in enumerate(models):
42
+ if i%cols == 0:
43
+ model_descriptions += ' <tr>\n'
44
+ model_descriptions += f' <td>{model}</td>\n'
45
+ if (i+1)%cols == 0:
46
+ model_descriptions += ' </tr>\n'
47
+ if len(models)%cols != 0:
48
+ num_pad = cols - len(models)%cols
49
+ model_descriptions += ' <td></td>\n' * num_pad
50
+ model_descriptions += ' </tr>\n'
51
+ model_descriptions += '</table>\n'
52
+ return model_descriptions.strip()
53
+
54
+ def get_object_dimension_intro_md():
55
+ md = f'''
56
+ Each model involved is conducted under consistent and standardized settings and assessed along **multiple evaluation dimensions** to provide a detailed view of its strengths and limitations:
57
+ '''
58
+ return md
59
+
60
+ def get_object_dimension_description_md():
61
+ md = f'''
62
+ - **Geometry Plausibility** assesses the structural integrity and physical feasibility of the generated shape.
63
+ - **Geometry Details** reflects the fidelity of fine-scale structures, such as sharp edges and part boundaries.
64
+ - **Texture Quality** evaluates the visual fidelity of surface textures in terms of resolution, realism, and aesthetic consistency.
65
+ - **Geometry-Texture Coherency** assesses the alignment between texture and shapeโ€”whether textures follow the contours, part boundaries, and material semantics of geometry.
66
+ - **Prompt-3D Alignment** evaluates the semantic and/or identity consistency between the input prompt and the generated 3D asset.
67
+ '''
68
+ return md
69
+
70
+ def get_leaderboard_intro_md():
71
+ md = '''
72
+ This leaderboard integrates results from three complementary benchmarks that span different aspects of 3D synthesis.
73
+ - [Hi3DEval]()
74
+ - [3DGenBench](https://zyh482.github.io/3DGen-Bench/)
75
+ - [GPTEval3D](https://github.com/3DTopia/GPTEval3D)
76
+ '''
77
+ return md
78
+
79
+
80
+ def get_hi3deval_intro_md(num_model=None):
81
+ md = f'''
82
+ This leaderboard is evaluated using **Hi3DEval**, a straight forward scoring benchmark that does **not rely on pairwise comparisons**.
83
+
84
+ Specifically, each dimension is assigned an absolute score within clearly defined value ranges:
85
+
86
+ - Geometry Plausibility: range [0, 9]
87
+ - Geometry Details: range [0, 4]
88
+ - Texture Quality: range [0, 4]
89
+ - Geometry-Texture Coherency: range [0, 1]
90
+ - Prompt-3D Alignment: range [0, 4]
91
+
92
+ The **Overall Score** is computed as the **SUM** of the scores across all five dimensions.
93
+
94
+ Hi3DEval supports unified evaluation for both **Text-to-3D** and **Image-to-3D** generation tasks. You can also freely select **"Task"** to explore performance under different input modalities.
95
+ '''
96
+ return md
97
+
98
+ def get_citation_md(name):
99
+ citations = {
100
+ "hi3deval": '''
101
+ ```bibtex
102
+ @article
103
+ ''',
104
+ "3dgen-bench": '''
105
+ ```bibtex
106
+ @article{zhang20253dgen,
107
+ title={3DGen-Bench: Comprehensive Benchmark Suite for 3D Generative Models},
108
+ author={Zhang, Yuhan and Zhang, Mengchen and Wu, Tong and Wang, Tengfei and Wetzstein, Gordon and Lin, Dahua and Liu, Ziwei},
109
+ journal={arXiv preprint arXiv:2503.21745},
110
+ year={2025}
111
+ }
112
+ ''',
113
+ "gpteval3d": '''
114
+ ```bibtex
115
+ @inproceedings{wu2024gpt,
116
+ title={Gpt-4v (ision) is a human-aligned evaluator for text-to-3d generation},
117
+ author={Wu, Tong and Yang, Guandao and Li, Zhibing and Zhang, Kai and Liu, Ziwei and Guibas, Leonidas and Lin, Dahua and Wetzstein, Gordon},
118
+ booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
119
+ pages={22227--22238},
120
+ year={2024}
121
+ }
122
+ '''
123
+ }
124
+ md = f"Reference:\n{citations[name.lower()]}"
125
+ return md
serve/model.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ from typing import List
3
+
4
+ ModelConfig = namedtuple("ModelConfig", ["model_name", "task", "representation", "paradigm", "page_link", "code_link", "organization"])
5
+ model_config = {}
6
+
7
+ def register_model_config(
8
+ nick_name:str, model_name: str, task: str, representation: str, paradigm: str = None, page_link: str = None, code_link: str = None, organization: str = None
9
+ ):
10
+ config = ModelConfig(model_name, task, representation, paradigm, page_link, code_link, organization)
11
+ model_config[nick_name] = config
12
+
13
+ def get_model_config(model_name: str) -> ModelConfig:
14
+ assert model_name in model_config
15
+ return model_config[model_name]
16
+
17
+
18
+ ### Registering model configurations for TExt-to-3D models
19
+
20
+ register_model_config(
21
+ nick_name="dreamfusion",
22
+ model_name="DreamFusion",
23
+ task="Text-to-3D",
24
+ representation="NeRF",
25
+ paradigm="Optimazation",
26
+ page_link="https://dreamfusion3d.github.io/",
27
+ code_link="",
28
+ organization="Google Research"
29
+ )
30
+
31
+ register_model_config(
32
+ nick_name="sjc",
33
+ model_name="SJC",
34
+ task="Text-to-3D",
35
+ representation="Voxel NeRF",
36
+ paradigm="Optimization",
37
+ page_link="https://pals.ttic.edu/p/score-jacobian-chaining",
38
+ code_link="https://github.com/pals-ttic/sjc",
39
+ organization="TTI-Chicago"
40
+ )
41
+
42
+ register_model_config(
43
+ nick_name="latent-nerf",
44
+ model_name="Latent-NeRF",
45
+ task="Text-to-3D",
46
+ representation="NeRF",
47
+ paradigm="Optimization",
48
+ page_link="",
49
+ code_link="https://github.com/eladrich/latent-nerf",
50
+ organization="Tel Aviv University"
51
+ )
52
+
53
+ register_model_config(
54
+ nick_name="magic3d",
55
+ model_name="Magic3D",
56
+ task="Text-to-3D",
57
+ representation="",
58
+ paradigm="",
59
+ page_link="https://research.nvidia.com/labs/dir/magic3d/",
60
+ code_link="",
61
+ organization="Nvidia"
62
+ )
63
+
64
+ register_model_config(
65
+ nick_name="lucid-dreamer",
66
+ model_name="LucidDreamer",
67
+ task="Text-to-3D",
68
+ representation="",
69
+ paradigm="",
70
+ page_link="https://luciddreamer-cvlab.github.io/",
71
+ code_link="https://github.com/luciddreamer-cvlab/LucidDreamer",
72
+ organization="Computer Vision Lab, Seoul National University "
73
+ )
74
+
75
+ register_model_config(
76
+ nick_name="mvdream",
77
+ model_name="MVDream",
78
+ task="Text-to-3D",
79
+ representation="",
80
+ paradigm="",
81
+ page_link="https://mv-dream.github.io/",
82
+ code_link="https://github.com/bytedance/MVDream",
83
+ organization="ByteDance"
84
+ )
85
+
86
+ register_model_config(
87
+ nick_name="grm-t",
88
+ model_name="GRM",
89
+ task="Text-to-3D",
90
+ representation="",
91
+ paradigm="",
92
+ page_link="https://justimyhxu.github.io/projects/grm/",
93
+ code_link="https://github.com/justimyhxu/grm",
94
+ organization="Stanford Univerity"
95
+ )
96
+
97
+ register_model_config(
98
+ nick_name="point-e-t",
99
+ model_name="Point-E",
100
+ task="Text-to-3D",
101
+ representation="",
102
+ paradigm="",
103
+ page_link="https://openai.com/index/point-e/",
104
+ code_link="https://github.com/openai/point-e",
105
+ organization="OpenAI"
106
+ )
107
+
108
+ register_model_config(
109
+ nick_name="shap-e-t",
110
+ model_name="Shap-E",
111
+ task="Text-to-3D",
112
+ representation="",
113
+ paradigm="",
114
+ page_link="",
115
+ code_link="https://github.com/openai/shap-e",
116
+ organization="OpenAI"
117
+ )
118
+
119
+
120
+ ## Registering model configurations for Image-to-3D models
121
+ register_model_config(
122
+ nick_name="trellis",
123
+ model_name="TRELLIS",
124
+ task="Image-to-3D",
125
+ representation="",
126
+ paradigm="Naive 3DGen",
127
+ page_link="https://microsoft.github.io/TRELLIS/",
128
+ code_link="https://github.com/Microsoft/TRELLIS",
129
+ organization="Microsoft Research"
130
+ )
131
+ register_model_config(
132
+ nick_name="hunyuan3d-2.0",
133
+ model_name="Hunyuan3D 2.0",
134
+ task="Image-to-3D",
135
+ representation="Mesh",
136
+ paradigm="Naive 3DGen",
137
+ page_link="https://3d-models.hunyuan.tencent.com/",
138
+ code_link="https://github.com/Tencent-Hunyuan/Hunyuan3D-2",
139
+ organization="Tencent Hunyuan3D Team"
140
+ )
141
+ register_model_config(
142
+ nick_name="hunyuan3d-2.5",
143
+ model_name="Hunyuan3D 2.5",
144
+ task="Image-to-3D",
145
+ representation="",
146
+ paradigm="",
147
+ page_link="",
148
+ code_link="",
149
+ organization="Tencent Hunyuan3D Team"
150
+ )
151
+ register_model_config(
152
+ nick_name="spar3d",
153
+ model_name="SPAR3D",
154
+ task="Image-to-3D",
155
+ representation="",
156
+ paradigm="",
157
+ page_link="https://spar3d.github.io/",
158
+ code_link="https://github.com/Stability-AI/stable-point-aware-3d",
159
+ organization="Stability AI"
160
+ )
161
+ register_model_config(
162
+ nick_name="instantmesh",
163
+ model_name="InstantMesh",
164
+ task="Image-to-3D",
165
+ representation="",
166
+ paradigm="",
167
+ page_link="",
168
+ code_link="https://github.com/TencentARC/InstantMesh",
169
+ organization="ARC Lab, Tencent PCG"
170
+ )
171
+ register_model_config(
172
+ nick_name="triposr",
173
+ model_name="TripoSR",
174
+ task="Image-to-3D",
175
+ representation="",
176
+ paradigm="",
177
+ page_link="",
178
+ code_link="https://github.com/VAST-AI-Research/TripoSR",
179
+ organization="Tripo AI, Staility AI"
180
+ )
181
+ register_model_config(
182
+ nick_name="unique3d",
183
+ model_name="Unique3D",
184
+ task="Image-to-3D",
185
+ representation="",
186
+ paradigm="",
187
+ page_link="https://wukailu.github.io/Unique3D/",
188
+ code_link="https://github.com/AiuniAI/Unique3D",
189
+ organization="Tsinghua University"
190
+ )
191
+ register_model_config(
192
+ nick_name="crm",
193
+ model_name="CRM",
194
+ task="Image-to-3D",
195
+ representation="",
196
+ paradigm="",
197
+ page_link="https://ml.cs.tsinghua.edu.cn/~zhengyi/CRM/",
198
+ code_link="https://github.com/thu-ml/CRM",
199
+ organization="Tsinghua University"
200
+ )
201
+ register_model_config(
202
+ nick_name="ln3diff",
203
+ model_name="LN3Diff",
204
+ task="Image-to-3D",
205
+ representation="",
206
+ paradigm="",
207
+ page_link="https://nirvanalan.github.io/projects/ln3diff/",
208
+ code_link="https://github.com/NIRVANALAN/LN3Diff",
209
+ organization="S-Lab, Nanyang Technological University"
210
+ )
211
+ register_model_config(
212
+ nick_name="wonder3d",
213
+ model_name="Wonder3D",
214
+ task="Image-to-3D",
215
+ representation="",
216
+ paradigm="",
217
+ page_link="https://www.xxlong.site/Wonder3D/",
218
+ code_link="https://github.com/xxlong0/Wonder3D",
219
+ organization="The University of Hong Kong"
220
+ )
221
+ register_model_config(
222
+ nick_name="openlrm",
223
+ model_name="OpenLRM",
224
+ task="Image-to-3D",
225
+ representation="",
226
+ paradigm="",
227
+ page_link="",
228
+ code_link="https://github.com/3DTopia/OpenLRM",
229
+ organization="Shanghai AI Lab"
230
+ )
231
+ register_model_config(
232
+ nick_name="sz123",
233
+ model_name="Stable Zero123",
234
+ task="Image-to-3D",
235
+ representation="",
236
+ paradigm="",
237
+ page_link="https://stability.ai/stable-3d",
238
+ code_link="https://huggingface.co/stabilityai/stable-zero123",
239
+ organization="Stability AI"
240
+ )
241
+ register_model_config(
242
+ nick_name="z123",
243
+ model_name="Zero-1-to-3 XL",
244
+ task="Image-to-3D",
245
+ representation="",
246
+ paradigm="",
247
+ page_link="https://zero123.cs.columbia.edu/",
248
+ code_link="https://github.com/cvlab-columbia/zero123",
249
+ organization="Columbia University"
250
+ )
251
+ register_model_config(
252
+ nick_name="magic123",
253
+ model_name="Magic123",
254
+ task="Image-to-3D",
255
+ representation="",
256
+ paradigm="",
257
+ page_link="https://guochengqian.github.io/project/magic123/",
258
+ code_link="https://github.com/guochengqian/Magic123",
259
+ organization="KAUST"
260
+ )
261
+ register_model_config(
262
+ nick_name="lgm",
263
+ model_name="LGM",
264
+ task="Image-to-3D",
265
+ representation="",
266
+ paradigm="",
267
+ page_link="https://me.kiui.moe/lgm/",
268
+ code_link="https://github.com/3DTopia/LGM",
269
+ organization="Peking University"
270
+ )
271
+ register_model_config(
272
+ nick_name="grm-i",
273
+ model_name="GRM",
274
+ task="Image-to-3D",
275
+ representation="",
276
+ paradigm="",
277
+ page_link="https://justimyhxu.github.io/projects/grm/",
278
+ code_link="https://github.com/justimyhxu/grm",
279
+ organization="Stanford Univerity"
280
+ )
281
+ register_model_config(
282
+ nick_name="syncdreamer",
283
+ model_name="SyncDreamer",
284
+ task="Image-to-3D",
285
+ representation="",
286
+ paradigm="",
287
+ page_link="https://liuyuan-pal.github.io/SyncDreamer/",
288
+ code_link="https://github.com/liuyuan-pal/SyncDreamer",
289
+ organization="The University of Hong Kong, Tencent Games"
290
+ )
291
+ register_model_config(
292
+ nick_name="shap-e-i",
293
+ model_name="Shap-E",
294
+ task="Image-to-3D",
295
+ representation="",
296
+ paradigm="",
297
+ page_link="",
298
+ code_link="https://github.com/openai/shap-e",
299
+ organization="OpenAI"
300
+ )
301
+ register_model_config(
302
+ nick_name="point-e-i",
303
+ model_name="Point-E",
304
+ task="Image-to-3D",
305
+ representation="",
306
+ paradigm="",
307
+ page_link="https://openai.com/index/point-e/",
308
+ code_link="https://github.com/openai/point-e",
309
+ organization="OpenAI"
310
+ )
311
+ register_model_config(
312
+ nick_name="escher-net",
313
+ model_name="EscherNet",
314
+ task="Image-to-3D",
315
+ representation="",
316
+ paradigm="",
317
+ page_link="https://kxhit.github.io/EscherNet",
318
+ code_link="https://github.com/kxhit/EscherNet",
319
+ organization="Dyson Robotics Lab, Imperial College London"
320
+ )
321
+ register_model_config(
322
+ nick_name="free3d",
323
+ model_name="Free3D",
324
+ task="Image-to-3D",
325
+ representation="",
326
+ paradigm="",
327
+ page_link="https://chuanxiaz.com/free3d/",
328
+ code_link="https://github.com/lyndonzheng/Free3D",
329
+ organization="Visual Geometry Group, University of Oxford"
330
+ )
331
+
332
+ register_model_config(
333
+ nick_name="triplane-gaussian",
334
+ model_name="TriplaneGaussian",
335
+ task="Image-to-3D",
336
+ representation="",
337
+ paradigm="",
338
+ page_link="https://zouzx.github.io/TriplaneGaussian/",
339
+ code_link="https://github.com/VAST-AI-Research/TriplaneGaussian",
340
+ organization="BNRist, Tsinghua University, VAST"
341
+ )
342
+
343
+ # register_model_config(
344
+ # nick_name="",
345
+ # model_name="",
346
+ # task="",
347
+ # representation="",
348
+ # paradigm="",
349
+ # page_link="",
350
+ # code_link="",
351
+ # organization=""
352
+ # )
353
+
serve/tmp.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ Live monitor of the website statistics and leaderboard.
4
+ Dependency:
5
+ sudo apt install pkg-config libicu-dev
6
+ pip install pytz gradio gdown plotly polyglot pyicu pycld2 tabulate
7
+ """
8
+
9
+ import argparse
10
+ import ast
11
+ import pickle
12
+ import os
13
+ import threading
14
+ import time
15
+
16
+ import gradio as gr
17
+ import numpy as np
18
+ import pandas as pd
19
+
20
+
21
+ basic_component_values = [None] * 6
22
+ leader_component_values = [None] * 5
23
+
24
+ nam_dict = {
25
+ "dreamfusion": "DreamFusion",
26
+ "mvdream": "MVDream",
27
+ "lucid-dreamer": "LucidDreamer",
28
+ "magic3d": "Magic3D",
29
+ "grm-t": "GRM", "grm-i": "GRM", "grm": "GRM",
30
+ "latent-nerf": "Latent-NeRF",
31
+ "shap-e-t": "Shap-E", "shap-e-i": "Shap-E", "shap-e": "Shap-E",
32
+ "point-e-t": "Point-E", "point-e-i": "Point-E", "point-e": "Point-E",
33
+ "sjc": "SJC",
34
+ "wonder3d": "Wonder3D",
35
+ "openlrm": "OpenLRM",
36
+ "sz123": "Stable Zero123", "stable-zero123": "Stable Zero123",
37
+ "z123": "Zero123-XL", "zero123-xl": "Zero123-XL",
38
+ "magic123": "Magic123",
39
+ "lgm": "LGM",
40
+ "syncdreamer": "SyncDreamer",
41
+ "triplane-gaussian": "TriplaneGaussian",
42
+ "escher-net": "EscherNet",
43
+ "free3d": "Free3D",
44
+ "instant-mesh": "InstantMesh",
45
+ }
46
+
47
+ def replace_model_name(name, rank):
48
+ name = nam_dict[name]
49
+
50
+ if rank==0:
51
+ return "๐Ÿฅ‡ "+name
52
+ elif rank==1:
53
+ return "๐Ÿฅˆ "+name
54
+ elif rank==2:
55
+ return '๐Ÿฅ‰ '+name
56
+ else:
57
+ return name
58
+
59
+ # def make_leaderboard_md(elo_results):
60
+ # leaderboard_md = f"""
61
+ # # ๐Ÿ† Chatbot Arena Leaderboard
62
+ # | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) |
63
+
64
+ # This leaderboard is based on the following three benchmarks.
65
+ # - [Chatbot Arena](https://lmsys.org/blog/2023-05-03-arena/) - a crowdsourced, randomized battle platform. We use 100K+ user votes to compute Elo ratings.
66
+ # - [MT-Bench](https://arxiv.org/abs/2306.05685) - a set of challenging multi-turn questions. We use GPT-4 to grade the model responses.
67
+ # - [MMLU](https://arxiv.org/abs/2009.03300) (5-shot) - a test to measure a model's multitask accuracy on 57 tasks.
68
+
69
+ # ๐Ÿ’ป Code: The Arena Elo ratings are computed by this [notebook]({notebook_url}). The MT-bench scores (single-answer grading on a scale of 10) are computed by [fastchat.llm_judge](https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge). The MMLU scores are mostly computed by [InstructEval](https://github.com/declare-lab/instruct-eval). Higher values are better for all benchmarks. Empty cells mean not available. Last updated: November, 2023.
70
+ # """
71
+ # return leaderboard_md
72
+
73
+ def make_leaderboard_md(elo_results):
74
+ leaderboard_md = f"""
75
+ # ๐Ÿ† 3DGen-Arena Leaderboard
76
+ """
77
+ return leaderboard_md
78
+
79
+
80
+ def make_leaderboard_md_live(elo_results):
81
+ leaderboard_md = f"""
82
+ # Leaderboard
83
+ Last updated: {elo_results["last_updated_datetime"]}
84
+ {elo_results["leaderboard_table"]}
85
+ """
86
+ return leaderboard_md
87
+
88
+
89
+ def model_hyperlink(model_name, link):
90
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
91
+
92
+
93
+ def load_leaderboard_table_csv(filename, add_hyperlink=True):
94
+ df = pd.read_csv(filename)
95
+ df = df.drop(df[df["Key"].isnull()].index)
96
+ for col in df.columns:
97
+ if "Elo rating" in col:
98
+ # print(col, df[col], type(df[col]), df[col] is not np.NaN)
99
+ df[col] = df[col].apply(lambda x: int(x) if (x != "-" and pd.notna(x)) else np.NaN)
100
+
101
+ if add_hyperlink and col == "Model":
102
+ df[col] = df.apply(lambda row: model_hyperlink(row[col], row["Link"]), axis=1)
103
+ return df
104
+
105
+
106
+
107
+ def build_basic_stats_tab():
108
+ empty = "Loading ..."
109
+ basic_component_values[:] = [empty, None, empty, empty, empty, empty]
110
+
111
+ md0 = gr.Markdown(empty)
112
+ gr.Markdown("#### Figure 1: Number of model calls and votes")
113
+ plot_1 = gr.Plot(show_label=False)
114
+ with gr.Row():
115
+ with gr.Column():
116
+ md1 = gr.Markdown(empty)
117
+ with gr.Column():
118
+ md2 = gr.Markdown(empty)
119
+ with gr.Row():
120
+ with gr.Column():
121
+ md3 = gr.Markdown(empty)
122
+ with gr.Column():
123
+ md4 = gr.Markdown(empty)
124
+ return [md0, plot_1, md1, md2, md3, md4]
125
+
126
+
127
+ def get_full_table(anony_arena_df, full_arena_df, model_table_df):
128
+ values = []
129
+ for i in range(len(model_table_df)):
130
+ row = []
131
+ model_key = model_table_df.iloc[i]["Key"]
132
+ model_name = model_table_df.iloc[i]["Model"]
133
+ # model display name
134
+ row.append(model_name)
135
+ if model_key in anony_arena_df.index:
136
+ idx = anony_arena_df.index.get_loc(model_key)
137
+ row.append(round(anony_arena_df.iloc[idx]["rating"]))
138
+ else:
139
+ row.append(np.nan)
140
+ if model_key in full_arena_df.index:
141
+ idx = full_arena_df.index.get_loc(model_key)
142
+ row.append(round(full_arena_df.iloc[idx]["rating"]))
143
+ else:
144
+ row.append(np.nan)
145
+ # row.append(model_table_df.iloc[i]["MT-bench (score)"])
146
+ # row.append(model_table_df.iloc[i]["Num Battles"])
147
+ # row.append(model_table_df.iloc[i]["MMLU"])
148
+ # Organization
149
+ row.append(model_table_df.iloc[i]["Organization"])
150
+ # license
151
+ row.append(model_table_df.iloc[i]["License"])
152
+
153
+ values.append(row)
154
+ values.sort(key=lambda x: -x[1] if not np.isnan(x[1]) else 1e9)
155
+ return values
156
+
157
+
158
+ def get_arena_table(arena_dfs, model_table_df):
159
+ # sort by rating
160
+ # arena_df = arena_df.sort_values(by=["rating"], ascending=False)
161
+ values = []
162
+ for i in range(len(model_table_df)):
163
+ row = []
164
+ # model_key = arena_df.index[i]
165
+ # model_name = model_table_df[model_table_df["Key"] == model_key]["Model"].values[
166
+ # 0
167
+ # ]
168
+ model_name = model_table_df.iloc[i]["Key"]
169
+
170
+ # rank
171
+ row.append(i + 1)
172
+ # model display name
173
+ row.append(replace_model_name(model_name, i))
174
+ # elo rating
175
+ num_battles = 0
176
+ for dim in arena_dfs.keys():
177
+ # try:
178
+ # print(arena_dfs[dim].loc[model_name])
179
+ # except:
180
+ # continue
181
+ row.append(round(arena_dfs[dim].loc[model_name]["rating"], 2))
182
+ upper_diff = round(arena_dfs[dim].loc[model_name]["rating_q975"] - arena_dfs[dim].loc[model_name]["rating"])
183
+ lower_diff = round(arena_dfs[dim].loc[model_name]["rating"] - arena_dfs[dim].loc[model_name]["rating_q025"])
184
+ # row.append(f"+{upper_diff}/-{lower_diff}")
185
+ try:
186
+ num_battles += round(arena_dfs[dim].loc[model_name]["num_battles"])
187
+ except:
188
+ num_battles += 0
189
+ # row.append(round(arena_df.iloc[i]["rating"]))
190
+ # upper_diff = round(arena_df.iloc[i]["rating_q975"] - arena_df.iloc[i]["rating"])
191
+ # lower_diff = round(arena_df.iloc[i]["rating"] - arena_df.iloc[i]["rating_q025"])
192
+ # row.append(f"+{upper_diff}/-{lower_diff}")
193
+ row.append(round(model_table_df.iloc[i]["Arena Elo rating"], 2))
194
+ # num battles
195
+ # row.append(round(arena_df.iloc[i]["num_battles"]))
196
+ row.append(num_battles)
197
+ # Organization
198
+ # row.append(
199
+ # model_table_df[model_table_df["Key"] == model_key]["Organization"].values[0]
200
+ # )
201
+ # # license
202
+ # row.append(
203
+ # model_table_df[model_table_df["Key"] == model_key]["License"].values[0]
204
+ # )
205
+
206
+ values.append(row)
207
+ return values
208
+
209
+ def make_arena_leaderboard_md(elo_results):
210
+ total_votes = 0
211
+ for dim in elo_results.keys():
212
+ arena_df = elo_results[dim]["leaderboard_table_df"]
213
+ last_updated = elo_results[dim]["last_updated_datetime"]
214
+ total_votes += sum(arena_df["num_battles"].fillna(0)) // 2
215
+ total_models = len(arena_df)
216
+
217
+ leaderboard_md = f"""
218
+ Total #models: **{total_models}**. \n
219
+ Total #votes: **{int(total_votes)}** (Anonymous Votes only). \n
220
+ Last updated: {last_updated}. \n
221
+ Contribute the votes ๐Ÿ—ณ๏ธ at [3DGen-Arena](https://huggingface.co/spaces/ZhangYuhan/3DGen-Arena)!
222
+ """
223
+ return leaderboard_md
224
+
225
+ def make_full_leaderboard_md(elo_results):
226
+ total_votes = 0
227
+ for dim in elo_results.keys():
228
+ arena_df = elo_results[dim]["leaderboard_table_df"]
229
+ last_updated = elo_results[dim]["last_updated_datetime"]
230
+ total_votes += sum(arena_df["num_battles"].fillna(0)) // 2
231
+ total_models = len(arena_df)
232
+
233
+ leaderboard_md = f"""
234
+ Total #models: **{total_models}**. \n
235
+ Total #votes: **{int(total_votes)}** (Anonymous + Named Votes). \n
236
+ Last updated: {last_updated}.\n
237
+ Contribute the votes ๐Ÿ—ณ๏ธ at [3DGen-Arena](https://huggingface.co/spaces/ZhangYuhan/3DGen-Arena)!
238
+ """
239
+ return leaderboard_md
240
+
241
+ def build_empty_leaderboard_tab():
242
+ leaderboard_md = f"""
243
+ # ๐Ÿ—ณ๏ธ Leaderboard
244
+ ## Look forward to your votes, and the leaderboard is coming soon!
245
+ """
246
+ gr.Markdown(leaderboard_md, elem_id="leaderboard_markdown")
247
+
248
+ def build_leaderboard_tab(elo_results_file, leaderboard_table_file, show_plot=False):
249
+ if elo_results_file is None: # Do live update
250
+ md = "Loading ..."
251
+ p1 = p2 = p3 = p4 = None
252
+ else:
253
+ with open(elo_results_file, "rb") as fin:
254
+ elo_results = pickle.load(fin)
255
+
256
+ # print(elo_results)
257
+ # print(elo_results.keys())
258
+ anony_elo_results, full_elo_results = {}, {}
259
+ anony_arena_dfs, full_arena_dfs = {}, {}
260
+ p1, p2, p3, p4 = {}, {}, {}, {}
261
+ for dim in elo_results.keys():
262
+ anony_elo_results[dim] = elo_results[dim]["anony"]
263
+ full_elo_results[dim] = elo_results[dim]["full"]
264
+ anony_arena_dfs[dim] = anony_elo_results[dim]["leaderboard_table_df"]
265
+ full_arena_dfs[dim] = full_elo_results[dim]["leaderboard_table_df"]
266
+ p1[dim] = anony_elo_results[dim]["win_fraction_heatmap"]
267
+ p2[dim] = anony_elo_results[dim]["battle_count_heatmap"]
268
+ p3[dim] = anony_elo_results[dim]["bootstrap_elo_rating"]
269
+ p4[dim] = anony_elo_results[dim]["average_win_rate_bar"]
270
+ print(anony_arena_dfs[dim])
271
+ print(full_arena_dfs[dim])
272
+
273
+ md = make_leaderboard_md(anony_elo_results)
274
+
275
+ md_1 = gr.Markdown(md, elem_id="leaderboard_markdown")
276
+
277
+ if leaderboard_table_file:
278
+ model_table_df = load_leaderboard_table_csv(leaderboard_table_file)
279
+ model_table_df_full = load_leaderboard_table_csv(str(leaderboard_table_file).replace('.csv', '_full.csv'))
280
+ with gr.Tabs() as tabs:
281
+ # arena table
282
+ arena_table_vals = get_arena_table(anony_arena_dfs, model_table_df)
283
+ with gr.Tab("Anony. Arena", id=0):
284
+ md = make_arena_leaderboard_md(anony_elo_results)
285
+ gr.Markdown(md, elem_id="leaderboard_markdown")
286
+ gr.Dataframe(
287
+ # headers=[
288
+ # "Rank",
289
+ # "๐Ÿค– Model",
290
+ # "โญ Arena Elo",
291
+ # "๐Ÿ“Š 95% CI",
292
+ # "๐Ÿ—ณ๏ธ Votes",
293
+ # "Organization",
294
+ # "License",
295
+ # ],
296
+ headers=["Rank", "๐Ÿค– Model"] + [f"๐Ÿ“ˆ {dim} Elo" for dim in anony_arena_dfs.keys()] + ["โญ Avg. Arena Elo Ranking", "๐Ÿ“ฎ Votes"],
297
+ datatype=[
298
+ "str",
299
+ "markdown",
300
+ "number",
301
+ "number",
302
+ "number",
303
+ "number",
304
+ "number",
305
+ "number",
306
+ "number"
307
+ ],
308
+ value=arena_table_vals,
309
+ # value=model_table_df,
310
+ elem_id="arena_leaderboard_dataframe",
311
+ height=700,
312
+ column_widths=[50, 200, 100, 100, 100, 100, 100, 100, 100],
313
+ wrap=True,
314
+ )
315
+ with gr.Tab("Full Arena", id=1):
316
+ md = make_full_leaderboard_md(full_elo_results)
317
+ gr.Markdown(md, elem_id="leaderboard_markdown")
318
+ full_table_vals = get_arena_table(full_arena_dfs, model_table_df_full)
319
+ gr.Dataframe(
320
+ headers=["Rank", "๐Ÿค– Model"] + [f"๐Ÿ“ˆ {dim} Elo" for dim in anony_arena_dfs.keys()] + ["โญ Avg. Arena Elo Ranking", "๐Ÿ“ฎ Votes"],
321
+ datatype=[
322
+ "str",
323
+ "markdown",
324
+ "number",
325
+ "number",
326
+ "number",
327
+ "number",
328
+ "number",
329
+ "number",
330
+ "number"
331
+ ],
332
+ value=full_table_vals,
333
+ elem_id="full_leaderboard_dataframe",
334
+ column_widths=[50, 200, 100, 100, 100, 100, 100, 100, 100],
335
+ height=700,
336
+ wrap=True,
337
+ )
338
+ if not show_plot:
339
+ gr.Markdown(
340
+ """ ## We are still collecting more votes on more models. The ranking will be updated very fruquently. Please stay tuned!
341
+ """,
342
+ elem_id="leaderboard_markdown",
343
+ )
344
+ else:
345
+ pass
346
+
347
+ # leader_component_values[:] = [md, p1, p2, p3, p4]
348
+
349
+ """
350
+ with gr.Row():
351
+ with gr.Column():
352
+ gr.Markdown(
353
+ "#### Figure 1: Fraction of Model A Wins for All Non-tied A vs. B Battles"
354
+ )
355
+ plot_1 = gr.Plot(p1, show_label=False)
356
+ with gr.Column():
357
+ gr.Markdown(
358
+ "#### Figure 2: Battle Count for Each Combination of Models (without Ties)"
359
+ )
360
+ plot_2 = gr.Plot(p2, show_label=False)
361
+ with gr.Row():
362
+ with gr.Column():
363
+ gr.Markdown(
364
+ "#### Figure 3: Bootstrap of Elo Estimates (1000 Rounds of Random Sampling)"
365
+ )
366
+ plot_3 = gr.Plot(p3, show_label=False)
367
+ with gr.Column():
368
+ gr.Markdown(
369
+ "#### Figure 4: Average Win Rate Against All Other Models (Assuming Uniform Sampling and No Ties)"
370
+ )
371
+ plot_4 = gr.Plot(p4, show_label=False)
372
+ """
373
+
374
+ from .utils import acknowledgment_md
375
+
376
+ gr.Markdown(acknowledgment_md)
377
+
378
+ # return [md_1, plot_1, plot_2, plot_3, plot_4]
379
+ return [md_1]