Datasets:
Upload folder using huggingface_hub
Browse files
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
hkmmlu.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import datasets
|
3 |
+
import pandas as pd
|
4 |
+
|
5 |
+
_URL = r"https://huggingface.co/datasets/chuxuecao/hkmmlu/resolve/main/hkmmlu.zip"
|
6 |
+
|
7 |
+
task_list = ['basic_medical_science.json', 'business_management.json', 'chinese_conditions.json', 'chinese_history.json', 'chinese_world_culture.json', 'chinese_world_geography.json', 'clinical_psychology.json', 'college_math.json', 'culture_commonsense.json', 'dentistry.json', 'economics.json', 'educational_psychology.json', 'financial_analysis.json', 'fire_science.json', 'hk_celebrity.json', 'hk_cultural_art.json', 'hk_current_affairs.json', 'hk_education.json', 'hk_entertainment_industry_management.json', 'hk_events_festivals.json', 'hk_film_television.json', 'hk_government_political_system.json', 'hk_historical_development_geography.json', 'hk_history.json', 'hk_human_geography.json', 'hk_law.json', 'hk_life_knowledge.json', 'hk_literature.json', 'hk_livelihood.json', 'hk_music.json', 'hk_party_politics.json', 'hk_physical_geography.json', 'hk_politicians_events.json', 'hk_society.json', 'hk_transport_infrastructure.json', 'hk_transportation.json', 'hk_urban_regional_development.json', 'hkdse_biology.json', 'hkdse_business_accounting_finance.json', 'hkdse_chemistry.json', 'hkdse_economics.json', 'hkdse_geography.json', 'hkdse_information_and_communication_technology.json', 'hkdse_mathematics.json', 'hkdse_physics.json', 'hkdse_tourism.json', 'human_behavior.json', 'insurance_studies.json', 'logic_reasoning.json', 'management_accounting.json', 'marketing_management.json', 'mechanical.json', 'nautical_science.json', 'optometry.json', 'organic_chemistry.json', 'pharmacology.json', 'pharmacy.json', 'physics.json', 'science_technology_commonsense.json', 'social_commonsense.json', 'statistics_and_machine_learning.json', 'technical.json', 'traditional_chinese_medicine_clinical_medicine.json', 'veterinary_pathology.json', 'veterinary_pharmacology.json', 'world_entertainment.json']
|
8 |
+
|
9 |
+
|
10 |
+
class HKMMLUConfig(datasets.BuilderConfig):
|
11 |
+
def __init__(self, **kwargs):
|
12 |
+
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
13 |
+
|
14 |
+
|
15 |
+
class HKMMLU(datasets.GeneratorBasedBuilder):
|
16 |
+
BUILDER_CONFIGS = [
|
17 |
+
HKMMLUConfig(name=task_name) for task_name in task_list
|
18 |
+
]
|
19 |
+
|
20 |
+
def _info(self):
|
21 |
+
features = datasets.Features(
|
22 |
+
{
|
23 |
+
"Question": datasets.Value("string"),
|
24 |
+
"A": datasets.Value("string"),
|
25 |
+
"B": datasets.Value("string"),
|
26 |
+
"C": datasets.Value("string"),
|
27 |
+
"D": datasets.Value("string"),
|
28 |
+
"Answer": datasets.Value("string"),
|
29 |
+
}
|
30 |
+
)
|
31 |
+
return datasets.DatasetInfo(
|
32 |
+
features=features
|
33 |
+
)
|
34 |
+
|
35 |
+
def _split_generators(self, dl_manager):
|
36 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
37 |
+
task_name = self.config.name
|
38 |
+
return [
|
39 |
+
datasets.SplitGenerator(
|
40 |
+
name=datasets.Split.TEST,
|
41 |
+
gen_kwargs={
|
42 |
+
"filepath": os.path.join(data_dir, f"test/{task_name}.csv"),
|
43 |
+
},
|
44 |
+
),
|
45 |
+
datasets.SplitGenerator(
|
46 |
+
name=datasets.Split("dev"),
|
47 |
+
gen_kwargs={
|
48 |
+
"filepath": os.path.join(data_dir, f"dev/{task_name}.csv"),
|
49 |
+
},
|
50 |
+
),
|
51 |
+
]
|
52 |
+
|
53 |
+
def _generate_examples(self, filepath):
|
54 |
+
df = pd.read_csv(filepath, header=0, index_col=0, encoding="utf-8")
|
55 |
+
for i, instance in enumerate(df.to_dict(orient="records")):
|
56 |
+
yield i, instance
|
hkmmlu.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b3bd8a8a881c131e8caf7211cd9daca1e333afd9bf3a5623eec38cab724f5887
|
3 |
+
size 1814978
|
readme.md
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# HKMMLU
|
2 |
+
|
3 |
+
This repo contains the inference code for the Paper "Measuring Hong Kong Massive Multi-Task Language Understanding"
|
4 |
+
|
5 |
+
## Introduction
|
6 |
+
|
7 |
+
We introduce HKMMLU, a multi-task language understanding benchmark that evaluates both Hong Kong linguistic competence and socio-cultural knowledge. The HKMMLU includes 26,698 multi-choice questions across 66 subjects, organized into four categories: Science, Technology, Engineering, and Mathematics (STEM), Social Sciences, Humanities, and Other. To evaluate the multilingual understanding ability of LLMs, it additionally comprises 90,550 Mandarin-Cantonese translation tasks. We conduct comprehensive experiments on GPT-4o, Claude 3.7 Sonnet, and 18 open-source models of varying sizes on HKMMLU.
|
8 |
+
|
9 |
+
The results show that the best-performing model, DeepSeek-V3, struggles to achieve an accuracy of 75\%, significantly lower than that of MMLU and CMMLU. This underscores the need for further research to enhance LLMs in language and knowledge specific to Hong Kong. Furthermore, we investigate how question language, model size, prompting strategies, and the lengths of question and reasoning token affect model performance. We anticipate that HKMMLU will significantly advance the development of LLMs in multilingual and cross-cultural contexts, thereby enabling broader and more impactful applications.
|
10 |
+
|
11 |
+
|
12 |
+
## Evaluation
|
13 |
+
|
14 |
+
To run local inference, modified
|
15 |
+
|
16 |
+
```bash
|
17 |
+
cd hkmmlu/scripts/
|
18 |
+
python bash ./run_inference.sh
|
19 |
+
```
|
20 |
+
|
21 |
+
To use the API for inference,
|
22 |
+
|
23 |
+
```bash
|
24 |
+
cd hkmmlu
|
25 |
+
python inference/api_inference.py --config_file configs/inference/api/gpt-4o.yaml
|
26 |
+
```
|
27 |
+
|
28 |
+
## 🏆 Mini-Leaderboard
|
29 |
+
|
30 |
+
| Model | Overall Accuracy |
|
31 |
+
| --------------------------- | :--------------: |
|
32 |
+
| DeepSeek-V3 | 74.8 |
|
33 |
+
| GPT-4o | 70.3 |
|
34 |
+
| Qwen2.5-72B-Instruct | 69.1 |
|
35 |
+
| Llama-3-Taiwan-70B-Instruct | 67.4 |
|
36 |
+
| Claude 3.7 Sonnet | 66.7 |
|
37 |
+
| Qwen2.5-14B-Instruct | 62.3 |
|
38 |
+
| Yi-1.5-34B-Chat | 60.6 |
|
39 |
+
| Mistral-Large-Instruct | 60.0 |
|
40 |
+
| Llama-3-70B-Instruct | 59.2 |
|
41 |
+
| Llama-3.1-70B-Instruct | 58.7 |
|
42 |
+
| Llama-3-Taiwan-70B-Instruct | 67.4 |
|
43 |
+
| Llama-3-Taiwan-70B-Instruct | 67.4 |
|
44 |
+
| Llama-3-Taiwan-70B-Instruct | 67.4 |
|
45 |
+
| Llama-3-Taiwan-70B-Instruct | 67.4 |
|
46 |
+
| Llama-3-Taiwan-70B-Instruct | 67.4 |
|
47 |
+
| Llama-3-Taiwan-70B-Instruct | 67.4 |
|
48 |
+
| Qwen2.5-7B-Instruct | 57.2 |
|
49 |
+
| Gemma 2 IT 27B | 57.4 |
|
50 |
+
| Yi-1.5-9B-Chat | 52.6 |
|
51 |
+
| Llama-3-Taiwan-8B-Instruct | 51.9 |
|
52 |
+
| Qwen2.5-3B-Instruct | 50.3 |
|
53 |
+
| GLM-4-9B-Chat | 49.0 |
|
54 |
+
| Mistral-Small-Instruct | 45.2 |
|
55 |
+
| Llama-3-8B-Instruct | 45.1 |
|
56 |
+
| Gemma 2 IT 2B | 42.0 |
|
57 |
+
| Llama-3.1-8B-Instruct | 40.0 |
|
58 |
+
|
59 |
+
|
60 |
+
#### Load data
|
61 |
+
|
62 |
+
```python
|
63 |
+
from datasets import load_dataset
|
64 |
+
hkmmlu=load_dataset(r"chuxuecao/hkmmlu", "hk_film_television")
|
65 |
+
print(hkmmlu['test'][0])
|
66 |
+
```
|
67 |
+
|
68 |
+
#### Load all data at once
|
69 |
+
```python
|
70 |
+
task_list = ['basic_medical_science.json', 'business_management.json', 'chinese_conditions.json', 'chinese_history.json', 'chinese_world_culture.json', 'chinese_world_geography.json', 'clinical_psychology.json', 'college_math.json', 'culture_commonsense.json', 'dentistry.json', 'economics.json', 'educational_psychology.json', 'financial_analysis.json', 'fire_science.json', 'hk_celebrity.json', 'hk_cultural_art.json', 'hk_current_affairs.json', 'hk_education.json', 'hk_entertainment_industry_management.json', 'hk_events_festivals.json', 'hk_film_television.json', 'hk_government_political_system.json', 'hk_historical_development_geography.json', 'hk_history.json', 'hk_human_geography.json', 'hk_law.json', 'hk_life_knowledge.json', 'hk_literature.json', 'hk_livelihood.json', 'hk_music.json', 'hk_party_politics.json', 'hk_physical_geography.json', 'hk_politicians_events.json', 'hk_society.json', 'hk_transport_infrastructure.json', 'hk_transportation.json', 'hk_urban_regional_development.json', 'hkdse_biology.json', 'hkdse_business_accounting_finance.json', 'hkdse_chemistry.json', 'hkdse_economics.json', 'hkdse_geography.json', 'hkdse_information_and_communication_technology.json', 'hkdse_mathematics.json', 'hkdse_physics.json', 'hkdse_tourism.json', 'human_behavior.json', 'insurance_studies.json', 'logic_reasoning.json', 'management_accounting.json', 'marketing_management.json', 'mechanical.json', 'nautical_science.json', 'optometry.json', 'organic_chemistry.json', 'pharmacology.json', 'pharmacy.json', 'physics.json', 'science_technology_commonsense.json', 'social_commonsense.json', 'statistics_and_machine_learning.json', 'technical.json', 'traditional_chinese_medicine_clinical_medicine.json', 'veterinary_pathology.json', 'veterinary_pharmacology.json', 'world_entertainment.json']
|
71 |
+
from datasets import load_dataset
|
72 |
+
hkmmlu = {task: load_dataset(r"chuxuecao/hkmmlu", task) for task in task_list}
|
73 |
+
```
|
74 |
+
|