root commited on
Commit
6dd5c50
Β·
1 Parent(s): bb8fd46

modified for v2

Browse files
Files changed (1) hide show
  1. app.py +468 -185
app.py CHANGED
@@ -1,204 +1,487 @@
1
  import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
- import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
- from huggingface_hub import snapshot_download
6
-
7
- from src.about import (
8
- CITATION_BUTTON_LABEL,
9
- CITATION_BUTTON_TEXT,
10
- EVALUATION_QUEUE_TEXT,
11
- INTRODUCTION_TEXT,
12
- LLM_BENCHMARKS_TEXT,
13
- TITLE,
14
- )
15
- from src.display.css_html_js import custom_css
16
- from src.display.utils import (
17
- BENCHMARK_COLS,
18
- COLS,
19
- EVAL_COLS,
20
- EVAL_TYPES,
21
- AutoEvalColumn,
22
- ModelType,
23
- fields,
24
- WeightType,
25
- Precision
26
- )
27
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
- from src.submission.submit import add_new_eval
30
 
 
 
 
 
 
31
 
32
  def restart_space():
33
- API.restart_space(repo_id=REPO_ID)
34
-
35
- ### Space initialisation
36
- try:
37
- print(EVAL_REQUESTS_PATH)
38
- snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
40
- )
41
- except Exception:
42
- restart_space()
43
- try:
44
- print(EVAL_RESULTS_PATH)
45
- snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
47
- )
48
- except Exception:
49
- restart_space()
50
-
51
-
52
- LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
53
-
54
- (
55
- finished_eval_queue_df,
56
- running_eval_queue_df,
57
- pending_eval_queue_df,
58
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
59
-
60
- def init_leaderboard(dataframe):
61
- if dataframe is None or dataframe.empty:
62
- raise ValueError("Leaderboard DataFrame is empty or None.")
63
- return Leaderboard(
64
- value=dataframe,
65
- datatype=[c.type for c in fields(AutoEvalColumn)],
66
- select_columns=SelectColumns(
67
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
- label="Select Columns to Display:",
70
- ),
71
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
- hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
- filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
- ColumnFilter(
77
- AutoEvalColumn.params.name,
78
- type="slider",
79
- min=0.01,
80
- max=150,
81
- label="Select the number of parameters (B)",
82
- ),
83
- ColumnFilter(
84
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
- ),
86
- ],
87
- bool_checkboxgroup_label="Hide models",
88
- interactive=False,
89
- )
90
-
91
-
92
- demo = gr.Blocks(css=custom_css)
93
- with demo:
94
- gr.HTML(TITLE)
95
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
- with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
99
- leaderboard = init_leaderboard(LEADERBOARD_DF)
100
-
101
- with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
102
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
-
104
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
- with gr.Column():
106
- with gr.Row():
107
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
-
109
- with gr.Column():
110
- with gr.Accordion(
111
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
112
- open=False,
113
- ):
114
- with gr.Row():
115
- finished_eval_table = gr.components.Dataframe(
116
- value=finished_eval_queue_df,
117
- headers=EVAL_COLS,
118
- datatype=EVAL_TYPES,
119
- row_count=5,
120
- )
121
- with gr.Accordion(
122
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
123
- open=False,
124
- ):
125
- with gr.Row():
126
- running_eval_table = gr.components.Dataframe(
127
- value=running_eval_queue_df,
128
- headers=EVAL_COLS,
129
- datatype=EVAL_TYPES,
130
- row_count=5,
131
- )
132
-
133
- with gr.Accordion(
134
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
- open=False,
136
- ):
137
- with gr.Row():
138
- pending_eval_table = gr.components.Dataframe(
139
- value=pending_eval_queue_df,
140
- headers=EVAL_COLS,
141
- datatype=EVAL_TYPES,
142
- row_count=5,
143
- )
144
  with gr.Row():
145
- gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
 
 
 
147
  with gr.Row():
148
- with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
- model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
- multiselect=False,
155
- value=None,
156
- interactive=True,
157
- )
158
-
159
- with gr.Column():
160
- precision = gr.Dropdown(
161
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
162
- label="Precision",
163
- multiselect=False,
164
- value="float16",
165
- interactive=True,
166
- )
167
- weight_type = gr.Dropdown(
168
- choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
- multiselect=False,
171
- value="Original",
172
- interactive=True,
173
- )
174
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
-
176
- submit_button = gr.Button("Submit Eval")
177
- submission_result = gr.Markdown()
178
- submit_button.click(
179
- add_new_eval,
180
- [
181
- model_name_textbox,
182
- base_model_name_textbox,
183
- revision_name_textbox,
184
- precision,
185
- weight_type,
186
- model_type,
187
- ],
188
- submission_result,
189
- )
190
 
191
  with gr.Row():
192
- with gr.Accordion("πŸ“™ Citation", open=False):
193
  citation_button = gr.Textbox(
194
- value=CITATION_BUTTON_TEXT,
195
- label=CITATION_BUTTON_LABEL,
196
- lines=20,
 
 
 
 
 
197
  elem_id="citation-button",
198
  show_copy_button=True,
199
  )
 
 
 
 
 
 
 
 
 
 
200
 
201
  scheduler = BackgroundScheduler()
202
- scheduler.add_job(restart_space, "interval", seconds=1800)
203
  scheduler.start()
204
- demo.queue(default_concurrency_limit=40).launch()
 
1
  import gradio as gr
2
+ import os
3
+ from huggingface_hub import HfApi, snapshot_download
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
+ from datasets import load_dataset
6
+ from src.utils import load_all_data
7
+ from src.md import ABOUT_TEXT, TOP_TEXT
8
+ from src.plt import plot_avg_correlation
9
+ from src.constants import subset_mapping, length_categories, example_counts
10
+ from src.css import custom_css
11
+ import numpy as np
12
+
13
+ api = HfApi()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ COLLAB_TOKEN = os.environ.get("COLLAB_TOKEN")
16
+ evals_repo = "allenai/reward-bench-v2-results"
17
+
18
+ eval_set_repo = "allenai/reward-bench-v2"
19
+ repo_dir_rewardbench = "./evals/rewardbench/"
20
 
21
  def restart_space():
22
+ api.restart_space(repo_id="allenai/reward-bench-v2", token=COLLAB_TOKEN)
23
+
24
+ print("Pulling evaluation results")
25
+ repo = snapshot_download(
26
+ local_dir=repo_dir_rewardbench,
27
+ ignore_patterns=["pref-sets-scores/*", "eval-set-scores/*"],
28
+ repo_id=evals_repo,
29
+ use_auth_token=COLLAB_TOKEN,
30
+ tqdm_class=None,
31
+ etag_timeout=30,
32
+ repo_type="dataset",
33
+ )
34
+
35
+ def avg_over_rewardbench_v2(dataframe_core):
36
+ """
37
+ Averages over the subsets alpacaeval, mt-bench, llmbar, refusals, hep and returns dataframe with only these columns.
38
+
39
+ We average over 4 core sections (per prompt weighting):
40
+ 1. Chat: Includes the easy chat subsets (alpacaeval-easy, alpacaeval-length, alpacaeval-hard, mt-bench-easy, mt-bench-medium)
41
+ 2. Chat Hard: Includes the hard chat subsets (mt-bench-hard, llmbar-natural, llmbar-adver-neighbor, llmbar-adver-GPTInst, llmbar-adver-GPTOut, llmbar-adver-manual)
42
+ 3. Safety: Includes the safety subsets (refusals-dangerous, refusals-offensive, xstest-should-refuse, xstest-should-respond, do not answer)
43
+ 4. Reasoning: Includes the code and math subsets (math-prm, hep-cpp, hep-go, hep-java, hep-js, hep-python, hep-rust)
44
+ 5. Prior Sets (0.5 weight): Includes the test sets (anthropic_helpful, mtbench_human, shp, summarize)
45
+ """
46
+ domain_cols = ['factuality', 'coconot/safety', 'math', 'precise instruction following']
47
+ new_df = dataframe_core.copy()
48
+
49
+ # for main subsets, keys in subset_mapping, take the weighted avg by example_counts and store for the models
50
+ # Get the domain data and handle missing values
51
+ domain_data = new_df[domain_cols].values
52
+ masked_data = np.ma.masked_array(domain_data, np.isnan(domain_data))
53
+
54
+ # Calculate weighted average
55
+ average = np.ma.average(masked_data, axis=1, weights=domain_weights)
56
+ new_df["average"] = average.filled(np.nan)
57
+
58
+ # Rearrange columns for consistent output
59
+ keep_columns = ["model", "model_type", "average"] + domain_cols
60
+ new_df = new_df[keep_columns]
61
+
62
+ return new_df
63
+
64
+ def avg_over_rewardbench(dataframe_core, dataframe_prefs):
65
+ """
66
+ Averages over the subsets alpacaeval, mt-bench, llmbar, refusals, hep and returns dataframe with only these columns.
67
+
68
+ We average over 4 core sections (per prompt weighting):
69
+ 1. Chat: Includes the easy chat subsets (alpacaeval-easy, alpacaeval-length, alpacaeval-hard, mt-bench-easy, mt-bench-medium)
70
+ 2. Chat Hard: Includes the hard chat subsets (mt-bench-hard, llmbar-natural, llmbar-adver-neighbor, llmbar-adver-GPTInst, llmbar-adver-GPTOut, llmbar-adver-manual)
71
+ 3. Safety: Includes the safety subsets (refusals-dangerous, refusals-offensive, xstest-should-refuse, xstest-should-respond, do not answer)
72
+ 4. Reasoning: Includes the code and math subsets (math-prm, hep-cpp, hep-go, hep-java, hep-js, hep-python, hep-rust)
73
+ 5. Prior Sets (0.5 weight): Includes the test sets (anthropic_helpful, mtbench_human, shp, summarize)
74
+ """
75
+ new_df = dataframe_core.copy()
76
+ dataframe_prefs = dataframe_prefs.copy()
77
+
78
+ # for main subsets, keys in subset_mapping, take the weighted avg by example_counts and store for the models
79
+ for subset, sub_subsets in subset_mapping.items():
80
+ subset_cols = [col for col in new_df.columns if col in sub_subsets]
81
+ sub_data = new_df[subset_cols].values # take the relevant column values
82
+ sub_counts = [example_counts[s] for s in subset_cols] # take the example counts
83
+ new_df[subset] = np.average(sub_data, axis=1, weights=sub_counts) # take the weighted average
84
+ # new_df[subset] = np.round(np.nanmean(new_df[subset_cols].values, axis=1), 2)
85
+
86
+ data_cols = list(subset_mapping.keys())
87
+ keep_columns = ["model",] + ["model_type"] + data_cols
88
+ # keep_columns = ["model", "average"] + subsets
89
+ new_df = new_df[keep_columns]
90
+
91
+ # selected average from pref_sets
92
+ pref_columns = ["anthropic_helpful", "anthropic_hhh", "shp", "summarize"]
93
+ pref_data = dataframe_prefs[pref_columns].values
94
+
95
+ # add column test sets knowing the rows are not identical, take superset
96
+ dataframe_prefs["Prior Sets (0.5 weight)"] = np.nanmean(pref_data, axis=1)
97
+
98
+ # add column Test Sets empty to new_df
99
+ new_df["Prior Sets (0.5 weight)"] = np.nan
100
+ # per row in new_df if model is in dataframe_prefs, add the value to new_df["Prior Sets (0.5 weight)"]
101
+ values = []
102
+ for i, row in new_df.iterrows():
103
+ model = row["model"]
104
+ if model in dataframe_prefs["model"].values:
105
+ values.append(dataframe_prefs[dataframe_prefs["model"] == model]["Prior Sets (0.5 weight)"].values[0])
106
+ # new_df.at[i, "Prior Sets (0.5 weight)"] = dataframe_prefs[dataframe_prefs["model"] == model]["Prior Sets (0.5 weight)"].values[0]
107
+ else:
108
+ values.append(np.nan)
109
+
110
+ new_df["Prior Sets (0.5 weight)"] = values
111
+
112
+ # add total average
113
+ data_cols += ["Prior Sets (0.5 weight)"]
114
+ final_data = new_df[data_cols].values
115
+ masked_data = np.ma.masked_array(final_data, np.isnan(final_data))
116
+ weights = [2, 2, 2, 2, 1]
117
+ average = np.ma.average(masked_data, axis=1, weights=weights)
118
+ new_df["average"] = average.filled(np.nan)
119
+ # new_df["average"] = np.nanmean(new_df[data_cols].values, axis=1)
120
+
121
+ # make average third column
122
+ keep_columns = ["model", "model_type", "average"] + data_cols
123
+ new_df = new_df[keep_columns]
124
+ return new_df
125
+
126
+ def expand_subsets(dataframe):
127
+ # TODO need to modify data/ script to do this
128
+ pass
129
+
130
+
131
+ def length_bias_check(dataframe):
132
+ """
133
+ Takes the raw rewardbench dataframe and splits the data into new buckets according to length_categories.
134
+ Then, take the average of the three buckets as "average"
135
+ """
136
+ new_df = dataframe.copy()
137
+ existing_subsets = new_df.columns[3:] # model, model_type, average
138
+ final_subsets = ["Length Bias", "Neutral", "Terse Bias"]
139
+ # new data is empty list dict for each final subset
140
+ new_data = {s: [] for s in final_subsets}
141
+
142
+ # now, subsets correspond to those with True, Nuetral, and False length bias
143
+ # check if length_categories[subset] == "True" or "False" or "Neutral"
144
+ for subset in existing_subsets:
145
+ subset_data = new_df[subset].values
146
+ subset_length = length_categories[subset]
147
+ # route to the correct bucket
148
+ if subset_length == "True":
149
+ new_data["Length Bias"].append(subset_data)
150
+ elif subset_length == "Neutral":
151
+ new_data["Neutral"].append(subset_data)
152
+ elif subset_length == "False":
153
+ new_data["Terse Bias"].append(subset_data)
154
+
155
+ # take average of new_data and add to new_df (removing other columns than model)
156
+ for subset in final_subsets:
157
+ new_df[subset] = np.nanmean(new_data[subset], axis=0)
158
+ keep_columns = ["model"] + final_subsets
159
+ new_df = new_df[keep_columns]
160
+ # recompute average
161
+ # new_df["average"] = np.round(np.nanmean(new_df[final_subsets].values, axis=1), 2)
162
+
163
+ return new_df
164
+
165
+
166
+
167
+ rewardbench_data = load_all_data(repo_dir_rewardbench, subdir="eval-set").sort_values(by='average', ascending=False)
168
+ rewardbench_data_length = length_bias_check(rewardbench_data).sort_values(by='Terse Bias', ascending=False)
169
+ prefs_data = load_all_data(repo_dir_rewardbench, subdir="pref-sets").sort_values(by='average', ascending=False)
170
+ # prefs_data_sub = expand_subsets(prefs_data).sort_values(by='average', ascending=False)
171
+
172
+ rewardbench_data_avg = avg_over_rewardbenc_v2(rewardbench_data, prefs_data).sort_values(by='average', ascending=False)
173
+
174
+ def prep_df(df):
175
+ # add column to 0th entry with count (column name itself empty)
176
+ df.insert(0, '', range(1, 1 + len(df)))
177
+
178
+ # replace "model" with "Model" and "model_type" with "Model Type" and "average" with "Average"
179
+ df = df.rename(columns={"model": "Model", "model_type": "Model Type", "average": "Average"})
180
+
181
+ # if "Model Type" in columns
182
+ if "Model Type" in df.columns:
183
+ # get model_types that have generative in them
184
+ mask = df["Model Type"].str.contains("generative", case=False, na=False)
185
+
186
+ # set these values to "Generative"
187
+ df.loc[mask, "Model Type"] = "Generative"
188
+
189
+ return df
190
+
191
+ # add count column to all dataframes
192
+ rewardbench_data = prep_df(rewardbench_data)
193
+ rewardbench_data_avg = prep_df(rewardbench_data_avg).rename(columns={"Average": "Score"})
194
+ # adjust weight of this average to 50% for Prior Sets (0.5 weight), 1 for others
195
+
196
+ rewardbench_data_length = prep_df(rewardbench_data_length)
197
+ prefs_data = prep_df(prefs_data)
198
+
199
+ col_types_rewardbench = ["number"] + ["markdown"] + ["str"] + ["number"] * (len(rewardbench_data.columns) - 1)
200
+ col_types_rewardbench_avg = ["number"] + ["markdown"]+ ["str"] + ["number"] * (len(rewardbench_data_avg.columns) - 1)
201
+ cols_rewardbench_data_length = ["markdown"] + ["number"] * (len(rewardbench_data_length.columns) - 1)
202
+ col_types_prefs = ["number"] + ["markdown"] + ["number"] * (len(prefs_data.columns) - 1)
203
+ # col_types_prefs_sub = ["markdown"] + ["number"] * (len(prefs_data_sub.columns) - 1)
204
 
205
+ # for showing random samples
206
+ eval_set = load_dataset(eval_set_repo, use_auth_token=COLLAB_TOKEN, split="filtered")
207
+ def random_sample(r: gr.Request, subset):
208
+ if subset is None or subset == []:
209
+ sample_index = np.random.randint(0, len(eval_set) - 1)
210
+ sample = eval_set[sample_index]
211
+ else: # filter by subsets (can be list)
212
+ if isinstance(subset, str):
213
+ subset = [subset]
214
+ # filter down dataset to only include the subset(s)
215
+ eval_set_filtered = eval_set.filter(lambda x: x["subset"] in subset)
216
+ sample_index = np.random.randint(0, len(eval_set_filtered) - 1)
217
+ sample = eval_set_filtered[sample_index]
218
+
219
+ markdown_text = '\n\n'.join([f"**{key}**:\n\n{value}" for key, value in sample.items()])
220
+ return markdown_text
221
+
222
+ subsets = eval_set.unique("subset")
223
+
224
+ color_map = {
225
+ "Generative": "#7497db",
226
+ "Custom Classifier": "#E8ECF2",
227
+ "Seq. Classifier": "#ffcd75",
228
+ "DPO": "#75809c",
229
+ }
230
+ def color_model_type_column(df, color_map):
231
+ """
232
+ Apply color to the 'Model Type' column of the DataFrame based on a given color mapping.
233
+
234
+ Parameters:
235
+ df (pd.DataFrame): The DataFrame containing the 'Model Type' column.
236
+ color_map (dict): A dictionary mapping model types to colors.
237
+
238
+ Returns:
239
+ pd.Styler: The styled DataFrame.
240
+ """
241
+ # Function to apply color based on the model type
242
+ def apply_color(val):
243
+ color = color_map.get(val, "default") # Default color if not specified in color_map
244
+ return f'background-color: {color}'
245
+
246
+ # Format for different columns
247
+ format_dict = {col: "{:.1f}" for col in df.columns if col not in ['Average', 'Model', 'Model Type']}
248
+ format_dict['Average'] = "{:.2f}"
249
+ format_dict[''] = "{:d}"
250
+
251
+ return df.style.applymap(apply_color, subset=['Model Type']).format(format_dict, na_rep='')
252
+
253
+ def regex_table(dataframe, regex, filter_button, style=True):
254
+ """
255
+ Takes a model name as a regex, then returns only the rows that has that in it.
256
+ """
257
+ # Split regex statement by comma and trim whitespace around regexes
258
+ regex_list = [x.strip() for x in regex.split(",")]
259
+ # Join the list into a single regex pattern with '|' acting as OR
260
+ combined_regex = '|'.join(regex_list)
261
+
262
+ # remove internal ai2 data
263
+ dataframe = dataframe[~dataframe["Model"].str.contains("ai2", case=False, na=False)]
264
+
265
+ # if filter_button, remove all rows with "ai2" in the model name
266
+ update_scores = False
267
+ if isinstance(filter_button, list) or isinstance(filter_button, str):
268
+ if "Prior Sets" not in filter_button and 'Prior Sets (0.5 weight)' in dataframe.columns:
269
+ update_scores = True
270
+ # remove the column "Prior Sets (0.5 weight)" from the outputted table
271
+ dataframe = dataframe.drop(columns=['Prior Sets (0.5 weight)'])
272
+ if "Seq. Classifiers" not in filter_button:
273
+ dataframe = dataframe[~dataframe["Model Type"].str.contains("Seq. Classifier", case=False, na=False)]
274
+ if "DPO" not in filter_button:
275
+ dataframe = dataframe[~dataframe["Model Type"].str.contains("DPO", case=False, na=False)]
276
+ if "Custom Classifiers" not in filter_button:
277
+ dataframe = dataframe[~dataframe["Model Type"].str.contains("Custom Classifier", case=False, na=False)]
278
+ if "Generative" not in filter_button:
279
+ dataframe = dataframe[~dataframe["Model Type"].str.contains("generative", case=False, na=False)]
280
+ # Filter the dataframe such that 'model' contains any of the regex patterns
281
+ data = dataframe[dataframe["Model"].str.contains(combined_regex, case=False, na=False)]
282
+
283
+ # if update the score to not use prior sets, do so
284
+ if update_scores:
285
+ data["Score"] = (data["Chat"] + data["Chat Hard"] + data["Safety"] + data["Reasoning"]) / 4
286
+ # if "Prior Sets (0.5 weight)" in data.columns:
287
+ # data["Prior Sets (0.5 weight)"] = np.nan
288
+ # sort array by Score column
289
+ data = data.sort_values(by='Score', ascending=False)
290
+
291
+ data.reset_index(drop=True, inplace=True)
292
+
293
+ # replace column '' with count/rank
294
+ data[''] = np.arange(1, 1 + len(data))
295
+
296
+ # if Score exists, round to 2 decimals
297
+ if "Score" in data.columns:
298
+ data["Score"] = np.round(np.array(data["Score"].values).astype(float), 2)
299
+ if "Average" in data.columns:
300
+ data["Average"] = np.round(np.array(data["Average"].values).astype(float), 1)
301
+ # round all others to 1 decimal
302
+ for col in data.columns:
303
+ if col not in ["", "Model", "Model Type", "Score", "Average"]:
304
+ # replace any data[col].values == '' with np.nan
305
+ data[col] = data[col].replace('', np.nan)
306
+ data[col] = np.round(np.array(data[col].values).astype(float), 1)
307
+ if style:
308
+ # apply color
309
+ data = color_model_type_column(data, color_map)
310
+
311
+ return data
312
+
313
+ # import ipdb; ipdb.set_trace()
314
+
315
+ total_models = len(regex_table(rewardbench_data_avg.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"], style=False).values)
316
+
317
+ with gr.Blocks(css=custom_css) as app:
318
+ # create tabs for the app, moving the current table to one titled "rewardbench" and the benchmark_text to a tab called "About"
319
+ with gr.Row():
320
+ with gr.Column(scale=6):
321
+ gr.Markdown(TOP_TEXT.format(str(total_models)))
322
+ with gr.Column(scale=4):
323
+ # search = gr.Textbox(label="Model Search (delimit with , )", placeholder="Regex search for a model")
324
+ # filter_button = gr.Checkbox(label="Include AI2 training runs (or type ai2 above).", interactive=True)
325
+ # img = gr.Image(value="https://private-user-images.githubusercontent.com/10695622/310698241-24ed272a-0844-451f-b414-fde57478703e.png", width=500)
326
+ gr.Markdown("""
327
+ ![](file/src/logo.png)
328
+ """)
329
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
330
+ with gr.TabItem("πŸ† RewardBench Leaderboard"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
  with gr.Row():
332
+ search_1 = gr.Textbox(label="Model Search (delimit with , )",
333
+ placeholder="Model Search (delimit with , )",
334
+ show_label=False)
335
+ model_types_1 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative", "Prior Sets"],
336
+ value=["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"],
337
+ label="Model Types",
338
+ show_label=False,
339
+ # info="Which model types to include.",
340
+ )
341
+ with gr.Row():
342
+ # reference data
343
+ rewardbench_table_hidden = gr.Dataframe(
344
+ rewardbench_data_avg.values,
345
+ datatype=col_types_rewardbench_avg,
346
+ headers=rewardbench_data_avg.columns.tolist(),
347
+ visible=False,
348
+ )
349
+ rewardbench_table = gr.Dataframe(
350
+ regex_table(rewardbench_data_avg.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"]),
351
+ datatype=col_types_rewardbench_avg,
352
+ headers=rewardbench_data_avg.columns.tolist(),
353
+ elem_id="rewardbench_dataframe_avg",
354
+ height=1000,
355
+ )
356
+
357
+ with gr.TabItem("πŸ” RewardBench - Detailed"):
358
+ with gr.Row():
359
+ search_2 = gr.Textbox(label="Model Search (delimit with , )", show_label=False, placeholder="Model Search (delimit with , )")
360
+ model_types_2 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"],
361
+ value=["Seq. Classifiers", "DPO", "Generative", "Custom Classifiers"],
362
+ label="Model Types",
363
+ show_label=False,
364
+ # info="Which model types to include."
365
+ )
366
+ with gr.Row():
367
+ # ref data
368
+ rewardbench_table_detailed_hidden = gr.Dataframe(
369
+ rewardbench_data.values,
370
+ datatype=col_types_rewardbench,
371
+ headers=rewardbench_data.columns.tolist(),
372
+ visible=False,
373
+ )
374
+ rewardbench_table_detailed = gr.Dataframe(
375
+ regex_table(rewardbench_data.copy(), "", ["Seq. Classifiers", "DPO", "Generative", "Custom Classifiers"]),
376
+ datatype=col_types_rewardbench,
377
+ headers=rewardbench_data.columns.tolist(),
378
+ elem_id="rewardbench_dataframe",
379
+ height=1000,
380
+ )
381
+ # with gr.TabItem("rewardbench Eval Set - Length Bias"):
382
+ # with gr.Row():
383
+ # # backup
384
+ # rewardbench_table_len_hidden = gr.Dataframe(
385
+ # rewardbench_data_length.values,
386
+ # datatype=cols_rewardbench_data_length,
387
+ # headers=rewardbench_data_length.columns.tolist(),
388
+ # visible=False,
389
+ # )
390
+ # rewardbench_table_len = gr.Dataframe(
391
+ # regex_table(rewardbench_data_length.copy(), "", False).values,
392
+ # datatype=cols_rewardbench_data_length,
393
+ # headers=rewardbench_data_length.columns.tolist(),
394
+ # elem_id="rewardbench_dataframe_length",
395
+ # height=1000,
396
+ # )
397
+ with gr.TabItem("Prior Test Sets"):
398
+ with gr.Row():
399
+ search_3 = gr.Textbox(label="Model Search (delimit with , )", show_label=False, placeholder="Model Search (delimit with , )")
400
+ model_types_3 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"],
401
+ value=["Seq. Classifiers", "DPO", "Custom Classifiers"],
402
+ label="Model Types",
403
+ show_label=False,
404
+ # info="Which model types to include.",
405
+ )
406
+ with gr.Row():
407
+ PREF_SET_TEXT = """
408
+ For more information, see the [dataset](https://huggingface.co/datasets/allenai/pref-test-sets). Only the subsets Anthropic Helpful, Anthropic HHH, Stanford SHP, and OpenAI's Summarize data are used in the leaderboard ranking.
409
+ """
410
+ gr.Markdown(PREF_SET_TEXT)
411
+ with gr.Row():
412
+ # backup
413
+ pref_sets_table_hidden = gr.Dataframe(
414
+ prefs_data.values,
415
+ datatype=col_types_prefs,
416
+ headers=prefs_data.columns.tolist(),
417
+ visible=False,
418
+ )
419
+ pref_sets_table = gr.Dataframe(
420
+ regex_table(prefs_data.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers"]),
421
+ datatype=col_types_prefs,
422
+ headers=prefs_data.columns.tolist(),
423
+ elem_id="prefs_dataframe",
424
+ height=1000,
425
+ )
426
 
427
+
428
+ with gr.TabItem("About"):
429
  with gr.Row():
430
+ gr.Markdown(ABOUT_TEXT)
431
+
432
+ with gr.TabItem("Dataset Viewer"):
433
+ with gr.Row():
434
+ # loads one sample
435
+ gr.Markdown("""## Random Dataset Sample Viewer
436
+ Warning, refusals, XSTest, and donotanswer datasets have sensitive content.""")
437
+ subset_selector = gr.Dropdown(subsets, label="Subset", value=None, multiselect=True)
438
+ button = gr.Button("Show Random Sample")
439
+
440
+ with gr.Row():
441
+ sample_display = gr.Markdown("{sampled data loads here}")
442
+
443
+ button.click(fn=random_sample, inputs=[subset_selector], outputs=[sample_display])
444
+ # removed plot because not pretty enough
445
+ # with gr.TabItem("Model Correlation"):
446
+ # with gr.Row():
447
+ # plot = plot_avg_correlation(rewardbench_data_avg, prefs_data)
448
+ # gr.Plot(plot)
449
+
450
+ search_1.change(regex_table, inputs=[rewardbench_table_hidden, search_1, model_types_1], outputs=rewardbench_table)
451
+ search_2.change(regex_table, inputs=[rewardbench_table_detailed_hidden, search_2, model_types_2], outputs=rewardbench_table_detailed)
452
+ # search.change(regex_table, inputs=[rewardbench_table_len_hidden, search, filter_button], outputs=rewardbench_table_len)
453
+ search_3.change(regex_table, inputs=[pref_sets_table_hidden, search_3, model_types_3], outputs=pref_sets_table)
454
+
455
+ model_types_1.change(regex_table, inputs=[rewardbench_table_hidden, search_1, model_types_1], outputs=rewardbench_table)
456
+ model_types_2.change(regex_table, inputs=[rewardbench_table_detailed_hidden, search_2, model_types_2], outputs=rewardbench_table_detailed)
457
+ model_types_3.change(regex_table, inputs=[pref_sets_table_hidden, search_3, model_types_3], outputs=pref_sets_table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458
 
459
  with gr.Row():
460
+ with gr.Accordion("πŸ“š Citation", open=False):
461
  citation_button = gr.Textbox(
462
+ value=r"""@misc{RewardBench,
463
+ title={RewardBench: Evaluating Reward Models for Language Modeling},
464
+ author={Lambert, Nathan and Pyatkin, Valentina and Morrison, Jacob and Miranda, LJ and Lin, Bill Yuchen and Chandu, Khyathi and Dziri, Nouha and Kumar, Sachin and Zick, Tom and Choi, Yejin and Smith, Noah A. and Hajishirzi, Hannaneh},
465
+ year={2024},
466
+ howpublished={\url{https://huggingface.co/spaces/allenai/reward-bench}
467
+ }""",
468
+ lines=7,
469
+ label="Copy the following to cite these results.",
470
  elem_id="citation-button",
471
  show_copy_button=True,
472
  )
473
+ # Load data when app starts, TODO make this used somewhere...
474
+ # def load_data_on_start():
475
+ # data_rewardbench = load_all_data(repo_dir_rewardbench)
476
+ # rewardbench_table.update(data_rewardbench)
477
+
478
+ # data_rewardbench_avg = avg_over_rewardbench(repo_dir_rewardbench)
479
+ # rewardbench_table.update(data_rewardbench_avg)
480
+
481
+ # data_prefs = load_all_data(repo_dir_prefs)
482
+ # pref_sets_table.update(data_prefs)
483
 
484
  scheduler = BackgroundScheduler()
485
+ scheduler.add_job(restart_space, "interval", seconds=10800) # restarted every 3h
486
  scheduler.start()
487
+ app.launch(allowed_paths=['src/']) # had .queue() before launch before... not sure if that's necessary