home_icon = "https://img.shields.io/badge/Website-Page-blue?logo=homeassistant&logoColor=white&style=flat-square" arxiv_icon = "https://img.shields.io/badge/ArXiv-Paper-b31b1b?logo=arxiv&logoColor=white&style=flat-square" github_icon = "https://img.shields.io/badge/GitHub-Repo-181717?logo=github&logoColor=white&style=flat-square" def hyperlink(name, link): return f'{name}' def get_title_md(): md = f''' # 🏆 Leaderboard for 3D Generative Models ''' return md def get_intro_md(): md = ''' This leaderboard provides a centralized evaluation platform for evaluating and tracking the performance of 3D generation models. ''' return md def get_model_intro_md(): md = ''' This leaderboard spans a diverse set of state-of-the-art 3D generation models, including different conditional settings such as images, text, or combinations thereof. ''' return md def get_model_description_md(model_config, cols=10, except_models=[]): model_list = {} for cfg in model_config.values(): task = cfg.task model_name = cfg.model_name model_link = cfg.page_link if cfg.page_link else cfg.code_link if task not in model_list.keys(): model_list[task] = set() if model_name not in except_models: model_list[task].add(hyperlink(model_name, model_link)) model_descriptions = "" for task, models in model_list.items(): model_descriptions += f"\n**{len(models)} {task} Generative Models**\n" ## model_table model_descriptions += '\n' for i, model in enumerate(models): if i%cols == 0: model_descriptions += ' \n' model_descriptions += f' \n' if (i+1)%cols == 0: model_descriptions += ' \n' if len(models)%cols != 0: num_pad = cols - len(models)%cols model_descriptions += ' \n' * num_pad model_descriptions += ' \n' model_descriptions += '
{model}
\n' return model_descriptions.strip() def get_object_dimension_intro_md(): md = f''' Each model involved is conducted under consistent and standardized settings and assessed along **multiple evaluation dimensions** to provide a detailed view of its strengths and limitations: ''' return md def get_object_dimension_description_md(): md = f''' - **Geometry Plausibility** assesses the structural integrity and physical feasibility of the generated shape. - **Geometry Details** reflects the fidelity of fine-scale structures, such as sharp edges and part boundaries. - **Texture Quality** evaluates the visual fidelity of surface textures in terms of resolution, realism, and aesthetic consistency. - **Geometry-Texture Coherency** assesses the alignment between texture and shape—whether textures follow the contours, part boundaries, and material semantics of geometry. - **Prompt-3D Alignment** evaluates the semantic and/or identity consistency between the input prompt and the generated 3D asset. ''' return md def get_leaderboard_intro_md(): md = ''' This leaderboard integrates results from three complementary benchmarks that span different aspects of 3D synthesis. - [Hi3DEval]() - [3DGenBench](https://zyh482.github.io/3DGen-Bench/) - [GPTEval3D](https://github.com/3DTopia/GPTEval3D) ''' return md def get_hi3deval_intro_md(): md = f''' This leaderboard is evaluated using **Hi3DEval**, a straight forward scoring benchmark that does **not rely on pairwise comparisons**.
Specifically, each dimension is assigned an absolute score within clearly defined value ranges: - Geometry Plausibility: range [0, 9] - Geometry Details: range [0, 4] - Texture Quality: range [0, 4] - Geometry-Texture Coherency: range [0, 1] - Prompt-3D Alignment: range [0, 4] The **Overall Score** is computed as the **SUM** of the scores across all five dimensions. Hi3DEval supports unified evaluation for both **Text-to-3D** and **Image-to-3D** generation tasks. You can also freely select **"Task"** to explore performance under different input modalities. ''' return md def get_citation_md(name): citations = { "hi3deval": ''' ```bibtex @article ''', "3dgen-bench": ''' ```bibtex @article{zhang20253dgen, title={3DGen-Bench: Comprehensive Benchmark Suite for 3D Generative Models}, author={Zhang, Yuhan and Zhang, Mengchen and Wu, Tong and Wang, Tengfei and Wetzstein, Gordon and Lin, Dahua and Liu, Ziwei}, journal={arXiv preprint arXiv:2503.21745}, year={2025} } ''', "gpteval3d": ''' ```bibtex @inproceedings{wu2024gpt, title={Gpt-4v (ision) is a human-aligned evaluator for text-to-3d generation}, author={Wu, Tong and Yang, Guandao and Li, Zhibing and Zhang, Kai and Liu, Ziwei and Guibas, Leonidas and Lin, Dahua and Wetzstein, Gordon}, booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition}, pages={22227--22238}, year={2024} } ''' } md = f"Reference:\n{citations[name.lower()]}" return md