import gradio as gr import pandas as pd import numpy as np from gradio_leaderboard import Leaderboard, SelectColumns, ColumnFilter # Define constants and enums # TITLE = "

M-RewardBench Leaderboard

" TITLE = '''

M-RewardBench: Evaluating Reward Models in Multilingual Settings

''' INTRODUCTION_TEXT = ''' Evaluating the chat, safety, reasoning, and translation capabilities of Multilingual Reward Models. 📄 [Paper](https://arxiv.org/pdf/2410.15522.pdf) | 💻 [Code](https://github.com/for-ai/m-rewardbench) | 🤗 [Dataset](https://hf.co/datasets/C4AI-Community/multilingual-reward-bench) | 📚 [arXiv](https://arxiv.org/abs/2410.15522) | 🏆 [Leaderboard](https://c4ai-community-m-rewardbench.hf.space/) 🌐 https://m-rewardbench.github.io/''' # GOOGLE_SHEET_URL = "https://docs.google.com/spreadsheets/d/1qrD7plUdrBwAw7G6UeDVZAaV9ihxaNAcoiKwSaqotR4/export?gid=0&format=csv" GOOGLE_SHEET_URLS = [ "https://docs.google.com/spreadsheets/d/1qrD7plUdrBwAw7G6UeDVZAaV9ihxaNAcoiKwSaqotR4/gviz/tq?tqx=out:csv&sheet=gt", "https://docs.google.com/spreadsheets/d/1qrD7plUdrBwAw7G6UeDVZAaV9ihxaNAcoiKwSaqotR4/gviz/tq?tqx=out:csv&sheet=maple" ] # ABOUT_TEXT = """ #

# M-RewardBench: Evaluating Reward Models in Multilingual Settings #

# Welcome to M-RewardBench Leaderboard!""" class AutoEvalColumn: model = { "name": "Model", "type": "markdown", "displayed_by_default": True, "never_hidden": True, } model_type = { "name": "MT", "type": "markdown", "displayed_by_default": True, "never_hidden": True, } @classmethod def add_columns_from_df(cls, df, columns): for col in columns: if col.lower() != 'model': # Skip if it's the model column since it's predefined setattr(cls, col, { "name": col, "type": "markdown", "displayed_by_default": True, "never_hidden": False, }) class AutoEvalColumnTranslation: model = { "name": "Model", "type": "markdown", "displayed_by_default": True, "never_hidden": True, } model_type = { "name": "MT", "type": "markdown", "displayed_by_default": True, "never_hidden": True, } @classmethod def add_columns_from_df(cls, df, columns): for col in columns: if col.lower() != 'model': # Skip if it's the model column since it's predefined setattr(cls, col, { "name": col, "type": "markdown", "displayed_by_default": True, "never_hidden": False, }) def get_result_data(): return pd.read_csv(GOOGLE_SHEET_URLS[0]) def get_translation_data(): return pd.read_csv(GOOGLE_SHEET_URLS[1]) def init_leaderboard(dataframe, df_class): if dataframe is None or dataframe.empty: raise ValueError("Leaderboard DataFrame is empty or None.") return Leaderboard( value=dataframe, datatype=[ col["type"] for col in df_class.__dict__.values() if isinstance(col, dict) ], select_columns=SelectColumns( default_selection=[ col["name"] for col in df_class.__dict__.values() if isinstance(col, dict) and col["displayed_by_default"] ], cant_deselect=[ col["name"] for col in df_class.__dict__.values() if isinstance(col, dict) and col.get("never_hidden", False) ], label="Select Columns to Display:", ), search_columns=["Model"], interactive=False, ) def format_model_link(row): """Format model name as HTML link if URL is available""" model_name = row["Model"] # url = row.get("URL", "") # if pd.notna(url) and url.strip(): # return f'{model_name}' return model_name lang_ids = "eng_Latn arb_Arab tur_Latn rus_Cyrl ces_Latn pol_Latn kor_Hang zho_Hans zho_Hant fra_Latn ell_Grek deu_Latn ron_Latn ita_Latn nld_Latn pes_Arab hin_Deva ukr_Cyrl por_Latn ind_Latn jpn_Jpan spa_Latn heb_Hebr vie_Latn" emojis = "🔢 💬 🎯" model_types = {"Generative RM": "💬", "DPO": "🎯", "Sequence Classifier": "🔢"} from functools import partial def format_with_color(val, min_val=50, max_val=100, scale=True): """ Formats a value with inline green color gradient CSS. Returns an HTML string with bold, black text and muted green background. """ try: val = float(val) if pd.isna(val): return str(val) # Normalize value between 50 and 100 to 0-1 range normalized = (val - min_val) / (max_val - min_val) # print(normalized) # Clamp value between 0 and 1 normalized = max(0, min(1, normalized)) # Create color gradient with reduced brightness (max 200 instead of 255) # and increased minimum intensity (50 instead of 0) intensity = int(50 + (150 * (1 - normalized))) # Return HTML with inline CSS - bold black text show_val = val if scale: show_val = val*100 return f'
{show_val:.1f}
' except (ValueError, TypeError): return str(val) demo = gr.Blocks(theme=gr.themes.Soft()) with demo: gr.HTML(TITLE) gr.Markdown(INTRODUCTION_TEXT) with gr.Tabs() as tabs: with gr.TabItem("🏅 Main"): df = get_result_data() df["Model_Type"] = df["Model_Type"].map(model_types) df["Model"] = df.apply(format_model_link, axis=1) df["zho"] = df[["zho_Hans", "zho_Hant"]].mean(axis=1) columns = lang_ids.split("\t") # print(df.head()) df.pop("zho_Hans") df.pop("zho_Hant") df.rename(columns={ "Model_Type": "MT", "Avg_Multilingual": "AVG", }, inplace=True) df.rename(columns={col: col[:3] for col in columns}, inplace=True) # df = df.style.applymap(apply_color_gradient, subset=['eng']) numeric_cols = df.select_dtypes(include=[np.number]).columns global_min = df.select_dtypes(include='number').min().min().astype(float) global_max = df.select_dtypes(include='number').max().max().astype(float) for col in numeric_cols: lang_format_with_color = partial(format_with_color, # min_val=df[col].min(), # max_val=df[col].max(), min_val=global_min, max_val=global_max, ) df[col] = df[col].apply(lang_format_with_color) # for col in numeric_cols: # df[col] = (df[col] * 100).round(1).astype(str) AutoEvalColumn.add_columns_from_df(df, numeric_cols) leaderboard = init_leaderboard(df, AutoEvalColumn) with gr.TabItem("🏅 Translation"): df = get_translation_data() df["Model_Type"] = df["Model_Type"].map(model_types) df["Model"] = df.apply(format_model_link, axis=1) df.rename(columns={ "Model_Type": "MT", "Avg": "AVG", }, inplace=True) numeric_cols = df.select_dtypes(include=[np.number]).columns # print(df[numeric_cols].min().min()) # print(df[numeric_cols].max().max()) global_min = df.select_dtypes(include='number').min().min().astype(float) global_max = df.select_dtypes(include='number').max().max().astype(float) # print(global_max) for col in numeric_cols: # print(df[col].min()) lang_format_with_color = partial(format_with_color, min_val=global_min, max_val=global_max, # min_val=df[col].min(), # max_val=df[col].max(), scale=False) df[col] = df[col].apply(lang_format_with_color) # for col in numeric_cols: # df[col] = (df[col] * 100).round(1).astype(str) AutoEvalColumnTranslation.add_columns_from_df(df, numeric_cols) leaderboard = init_leaderboard(df, AutoEvalColumnTranslation) # Add statistics tab with suitable emoji and title with gr.TabItem("📊 Statistics"): gr.Markdown('''## Dataset Statistics | Category | # Instances | # Languages | |------------------------------|-------------|-------------| | **General-purpose capabilities** | | | | Chat | 296 | 23 | | Chat-Hard | 407 | 23 | | Safety | 736 | 23 | | Reasoning | 1,430 | 23 | | **Multilingual knowledge** | | | | Translation | 400 | 2 | | **Total** | 66,787 | - |''') # gr.Markdown("### Model Statistics") with gr.Row(): with gr.Accordion("📚 Citation", open=False): citation_button = gr.Textbox( value=r"""@misc{gureja2024mrewardbench, title={M-RewardBench: Evaluating Reward Models in Multilingual Settings}, author={Srishti Gureja and Lester James V. Miranda and Shayekh Bin Islam and Rishabh Maheshwary and Drishti Sharma and Gusti Winata and Nathan Lambert and Sebastian Ruder and Sara Hooker and Marzieh Fadaee}, year={2024}, eprint={2410.15522}, archivePrefix={arXiv}, primaryClass={cs.CL}, url={https://arxiv.org/abs/2410.15522}, }""", lines=7, label="BibTeX", elem_id="citation-button", show_copy_button=True, ) demo.launch(ssr_mode=False)