Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,68 +1,72 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import pandas as pd
|
| 3 |
from apscheduler.schedulers.background import BackgroundScheduler
|
| 4 |
# Removed Hugging Face Hub imports as they are not needed for the simplified leaderboard
|
| 5 |
|
| 6 |
# --- Make sure these imports work relative to your file structure ---
|
| 7 |
-
# Option 1: If src is a directory in the same folder as your script:
|
| 8 |
try:
|
|
|
|
| 9 |
from src.about import (
|
| 10 |
CITATION_BUTTON_LABEL,
|
| 11 |
CITATION_BUTTON_TEXT,
|
| 12 |
EVALUATION_QUEUE_TEXT, # Keep if used by commented-out submit tab
|
| 13 |
INTRODUCTION_TEXT,
|
| 14 |
LLM_BENCHMARKS_TEXT,
|
| 15 |
-
TITLE,
|
| 16 |
)
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
from src.envs import REPO_ID # Keep if needed for restart_space or other functions
|
| 19 |
from src.submission.submit import add_new_eval # Keep if using the submit tab
|
| 20 |
print("Successfully imported from src module.")
|
| 21 |
-
# Option 2:
|
| 22 |
except ImportError:
|
| 23 |
print("Warning: Using placeholder values because src module imports failed.")
|
| 24 |
CITATION_BUTTON_LABEL="Citation"
|
| 25 |
CITATION_BUTTON_TEXT="Please cite us if you use this benchmark..."
|
| 26 |
EVALUATION_QUEUE_TEXT="Current evaluation queue:"
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
REPO_ID="your/space-id" # Replace with actual ID if needed
|
| 32 |
def add_new_eval(*args): return "Submission placeholder."
|
| 33 |
# --- End Placeholder Definitions ---
|
| 34 |
|
| 35 |
|
| 36 |
# --- Elo Leaderboard Configuration ---
|
| 37 |
-
#
|
| 38 |
-
# !!! IMPORTANT: Replace placeholder URLs with actual model/project pages. !!!
|
| 39 |
-
# Verify organizer and license information for accuracy.
|
| 40 |
data = [
|
| 41 |
-
{'model_name': 'gpt-4o-mini', 'url': 'https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/', 'organizer': 'OpenAI', 'license': 'Proprietary', 'MLE-Lite_Elo': 753, 'Tabular_Elo': 839, 'NLP_Elo': 758, 'CV_Elo': 754, 'Overall': 778},
|
| 42 |
-
{'model_name': 'gpt-4o', 'url': 'https://openai.com/index/hello-gpt-4o/', 'organizer': 'OpenAI', 'license': 'Proprietary', 'MLE-Lite_Elo': 830, 'Tabular_Elo': 861, 'NLP_Elo': 903, 'CV_Elo': 761, 'Overall': 841},
|
| 43 |
-
{'model_name': 'o3-mini', 'url': 'https://openai.com/index/openai-o3-mini/', 'organizer': 'OpenAI', 'license': 'Proprietary', 'MLE-Lite_Elo': 1108, 'Tabular_Elo': 1019, 'NLP_Elo': 1056, 'CV_Elo': 1207, 'Overall': 1096}, # Fill details later
|
| 44 |
-
{'model_name': 'deepseek-v3', 'url': 'https://api-docs.deepseek.com/news/news1226', 'organizer': 'DeepSeek', 'license': 'DeepSeek', 'MLE-Lite_Elo': 1004, 'Tabular_Elo': 1015, 'NLP_Elo': 1028, 'CV_Elo': 1067, 'Overall': 1023},
|
| 45 |
-
{'model_name': 'deepseek-r1', 'url': 'https://api-docs.deepseek.com/news/news250120', 'organizer': 'DeepSeek', 'license': 'DeepSeek', 'MLE-Lite_Elo': 1137, 'Tabular_Elo': 1053, 'NLP_Elo': 1103, 'CV_Elo': 1083, 'Overall': 1100},
|
| 46 |
-
{'model_name': 'gemini-2.0-flash', 'url': 'https://ai.google.dev/gemini-api/docs/models#gemini-2.0-flash', 'organizer': 'Google', 'license': 'Proprietary', 'MLE-Lite_Elo': 847, 'Tabular_Elo': 923, 'NLP_Elo': 860, 'CV_Elo': 978, 'Overall': 895},
|
| 47 |
-
{'model_name': 'gemini-2.0-pro', 'url': 'https://blog.google/technology/google-deepmind/gemini-model-updates-february-2025/', 'organizer': 'Google', 'license': 'Proprietary', 'MLE-Lite_Elo': 1064, 'Tabular_Elo': 1139, 'NLP_Elo': 1028, 'CV_Elo': 973, 'Overall': 1054},
|
| 48 |
-
{'model_name': 'gemini-2.5-pro', 'url': 'https://deepmind.google/technologies/gemini/pro/', 'organizer': 'Google', 'license': 'Proprietary', 'MLE-Lite_Elo': 1257, 'Tabular_Elo': 1150, 'NLP_Elo': 1266, 'CV_Elo': 1177, 'Overall': 1214},
|
| 49 |
]
|
| 50 |
-
|
| 51 |
-
# Create a master DataFrame
|
| 52 |
-
# Note: Columns 'organizer' and 'license' are created in lowercase here.
|
| 53 |
master_df = pd.DataFrame(data)
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
CATEGORIES = ["Overall", "MLE-Lite", "Tabular", "NLP", "CV"] # Overall first
|
| 57 |
-
DEFAULT_CATEGORY = "Overall" # Set a default category
|
| 58 |
-
|
| 59 |
-
# Map user-facing categories to DataFrame column names
|
| 60 |
category_to_column = {
|
| 61 |
-
"MLE-Lite": "MLE-Lite_Elo",
|
| 62 |
-
"
|
| 63 |
-
"NLP": "NLP_Elo",
|
| 64 |
-
"CV": "CV_Elo",
|
| 65 |
-
"Overall": "Overall"
|
| 66 |
}
|
| 67 |
|
| 68 |
# --- Helper function to update leaderboard ---
|
|
@@ -75,143 +79,246 @@ def update_leaderboard(category):
|
|
| 75 |
if score_column is None or score_column not in master_df.columns:
|
| 76 |
print(f"Warning: Invalid category '{category}' or column '{score_column}'. Falling back to default.")
|
| 77 |
score_column = category_to_column[DEFAULT_CATEGORY]
|
| 78 |
-
# Check fallback column too
|
| 79 |
if score_column not in master_df.columns:
|
| 80 |
-
# Return empty df with correct columns if still invalid
|
| 81 |
-
# Use lowercase keys here consistent with master_df for the empty case
|
| 82 |
print(f"Error: Default column '{score_column}' also not found.")
|
|
|
|
| 83 |
return pd.DataFrame({
|
| 84 |
-
"Rank": [],
|
| 85 |
-
"Model": [],
|
| 86 |
-
"Elo Score": [],
|
| 87 |
-
"Organizer": [], # Changed 'organizer' -> 'Organizer' for consistency in empty case
|
| 88 |
-
"License": [] # Changed 'license' -> 'License' for consistency in empty case
|
| 89 |
})
|
| 90 |
|
| 91 |
-
# Select base columns + the score column for sorting
|
| 92 |
-
# Ensure 'organizer' and 'license' are selected correctly (lowercase)
|
| 93 |
cols_to_select = ['model_name', 'url', 'organizer', 'license', score_column]
|
| 94 |
df = master_df[cols_to_select].copy()
|
| 95 |
-
|
| 96 |
-
# Sort by the selected 'Elo Score' descending
|
| 97 |
df.sort_values(by=score_column, ascending=False, inplace=True)
|
| 98 |
-
|
| 99 |
-
# Add Rank based on the sorted order
|
| 100 |
df.reset_index(drop=True, inplace=True)
|
| 101 |
df.insert(0, 'Rank', df.index + 1)
|
| 102 |
|
| 103 |
-
# Format Model Name as HTML Hyperlink
|
| 104 |
-
# The resulting column name will be 'Model' (capitalized)
|
| 105 |
df['Model'] = df.apply(
|
| 106 |
-
lambda row: f"<a href='{row['url'] if pd.notna(row['url']) else '#'}' target='_blank'
|
| 107 |
axis=1
|
| 108 |
)
|
| 109 |
|
| 110 |
-
# Rename
|
| 111 |
-
df.rename(columns={score_column: 'Elo Score'}, inplace=True)
|
| 112 |
-
|
| 113 |
-
# Rename 'organizer' and 'license' to match desired display headers
|
| 114 |
-
df.rename(columns={'organizer': 'Organizer', 'license': 'License'}, inplace=True)
|
| 115 |
-
|
| 116 |
-
# Select and reorder columns for final display using the ACTUAL column names in df
|
| 117 |
-
# Use capitalized 'Organizer' and 'License' here because they have been renamed.
|
| 118 |
final_columns = ["Rank", "Model", "Organizer", "License", "Elo Score"]
|
| 119 |
df = df[final_columns]
|
| 120 |
-
|
| 121 |
-
# Note: The DataFrame returned now has columns:
|
| 122 |
-
# 'Rank', 'Model', 'Organizer', 'License', 'Elo Score'
|
| 123 |
return df
|
| 124 |
|
| 125 |
# --- Mock/Placeholder functions/data for other tabs ---
|
| 126 |
-
# (If the Submit tab is used, ensure these variables are appropriately populated or handled)
|
| 127 |
print("Warning: Evaluation queue data fetching is disabled/mocked due to leaderboard changes.")
|
| 128 |
finished_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
|
| 129 |
running_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
|
| 130 |
pending_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
|
| 131 |
-
EVAL_COLS = ["Model", "Status", "Requested", "Started"]
|
| 132 |
-
EVAL_TYPES = ["str", "str", "str", "str"]
|
| 133 |
|
| 134 |
# --- Keep restart function if relevant ---
|
| 135 |
def restart_space():
|
| 136 |
-
# Make sure REPO_ID is correctly defined/imported if this function is used
|
| 137 |
print(f"Attempting to restart space: {REPO_ID}")
|
| 138 |
-
# Replace with your actual space restart mechanism if needed
|
| 139 |
-
|
| 140 |
|
| 141 |
-
# ---
|
|
|
|
|
|
|
| 142 |
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
# Adjust the '1.2em' value (e.g., to '1.4em', '16px') to change the size.
|
| 146 |
-
# The !important flag helps override theme defaults.
|
| 147 |
-
# If the imported custom_css already has content, append to it.
|
| 148 |
-
font_size_css = """
|
| 149 |
body {
|
| 150 |
-
font-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
}
|
| 152 |
-
|
| 153 |
-
/*
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
|
|
|
| 157 |
}
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
*/
|
| 163 |
-
|
| 164 |
-
#
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
"""
|
| 178 |
|
| 179 |
-
#
|
| 180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
|
| 182 |
with demo:
|
| 183 |
-
#
|
| 184 |
gr.HTML(TITLE)
|
| 185 |
|
| 186 |
-
#
|
| 187 |
-
|
|
|
|
|
|
|
| 188 |
|
| 189 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 190 |
with gr.TabItem("๐
MLE-Dojo Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
| 191 |
with gr.Column():
|
| 192 |
-
|
|
|
|
| 193 |
category_selector = gr.Radio(
|
| 194 |
choices=CATEGORIES,
|
| 195 |
-
label="Select Category:",
|
| 196 |
value=DEFAULT_CATEGORY,
|
| 197 |
interactive=True,
|
|
|
|
| 198 |
)
|
| 199 |
leaderboard_df_component = gr.Dataframe(
|
| 200 |
-
# Initialize with sorted data for the default category
|
| 201 |
value=update_leaderboard(DEFAULT_CATEGORY),
|
| 202 |
-
# Headers for DISPLAY should match the *renamed* columns from update_leaderboard
|
| 203 |
headers=["Rank", "Model", "Organizer", "License", "Elo Score"],
|
| 204 |
-
# Datatype maps to the final df columns: Rank, Model, Organizer, License, Elo Score
|
| 205 |
datatype=["number", "html", "str", "str", "number"],
|
| 206 |
interactive=False,
|
| 207 |
-
|
| 208 |
-
# row_count determines the number of rows to display
|
| 209 |
-
row_count=(len(master_df), "fixed"), # Display all rows
|
| 210 |
col_count=(5, "fixed"),
|
| 211 |
-
wrap=True,
|
| 212 |
-
elem_id="leaderboard-table" #
|
| 213 |
)
|
| 214 |
-
# Link the radio button change to the update function
|
| 215 |
category_selector.change(
|
| 216 |
fn=update_leaderboard,
|
| 217 |
inputs=category_selector,
|
|
@@ -219,87 +326,41 @@ with demo:
|
|
| 219 |
)
|
| 220 |
|
| 221 |
with gr.TabItem("๐ About", elem_id="llm-benchmark-tab-about", id=1):
|
| 222 |
-
#
|
| 223 |
-
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 224 |
|
| 225 |
-
# --- Submit Tab (
|
| 226 |
-
# Make sure EVALUATION_QUEUE_TEXT and add_new_eval are imported/defined if uncommented
|
| 227 |
# with gr.TabItem("๐ Submit here! ", elem_id="llm-benchmark-tab-submit", id=2):
|
| 228 |
-
#
|
| 229 |
-
#
|
| 230 |
-
|
| 231 |
-
# with gr.Column():
|
| 232 |
-
# with gr.Accordion(f"โ
Finished Evaluations ({len(finished_eval_queue_df)})", open=False):
|
| 233 |
-
# finished_eval_table = gr.components.Dataframe(
|
| 234 |
-
# value=finished_eval_queue_df, headers=EVAL_COLS, datatype=EVAL_TYPES, row_count=5,
|
| 235 |
-
# )
|
| 236 |
-
# with gr.Accordion(f"๐ Running Evaluation Queue ({len(running_eval_queue_df)})", open=False):
|
| 237 |
-
# running_eval_table = gr.components.Dataframe(
|
| 238 |
-
# value=running_eval_queue_df, headers=EVAL_COLS, datatype=EVAL_TYPES, row_count=5,
|
| 239 |
-
# )
|
| 240 |
-
# with gr.Accordion(f"โณ Pending Evaluation Queue ({len(pending_eval_queue_df)})", open=False):
|
| 241 |
-
# pending_eval_table = gr.components.Dataframe(
|
| 242 |
-
# value=pending_eval_queue_df, headers=EVAL_COLS, datatype=EVAL_TYPES, row_count=5,
|
| 243 |
-
# )
|
| 244 |
-
# with gr.Row():
|
| 245 |
-
# gr.Markdown("# โ๏ธโจ Submit your model here!", elem_classes="markdown-text")
|
| 246 |
-
# with gr.Row():
|
| 247 |
-
# with gr.Column():
|
| 248 |
-
# model_name_textbox = gr.Textbox(label="Model name (on Hugging Face Hub)")
|
| 249 |
-
# revision_name_textbox = gr.Textbox(label="Revision / Commit Hash", placeholder="main")
|
| 250 |
-
# model_type = gr.Dropdown(choices=["Type A", "Type B", "Type C"], label="Model type", multiselect=False, value=None, interactive=True) # Example choices
|
| 251 |
-
# with gr.Column():
|
| 252 |
-
# precision = gr.Dropdown(choices=["float16", "bfloat16", "float32", "int8", "auto"], label="Precision", multiselect=False, value="auto", interactive=True)
|
| 253 |
-
# weight_type = gr.Dropdown(choices=["Original", "Adapter", "Delta"], label="Weights type", multiselect=False, value="Original", interactive=True)
|
| 254 |
-
# base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
| 255 |
-
# submit_button = gr.Button("Submit Eval")
|
| 256 |
-
# submission_result = gr.Markdown()
|
| 257 |
-
# # Ensure add_new_eval is correctly imported/defined and handles these inputs
|
| 258 |
-
# submit_button.click(
|
| 259 |
-
# add_new_eval, # Requires import/definition
|
| 260 |
-
# [ model_name_textbox, base_model_name_textbox, revision_name_textbox, precision, weight_type, model_type, ],
|
| 261 |
-
# submission_result,
|
| 262 |
-
# )
|
| 263 |
|
| 264 |
# --- Citation Row (at the bottom, outside Tabs) ---
|
| 265 |
-
with gr.Accordion("๐ Citation", open=False):
|
| 266 |
-
# Use the CITATION_BUTTON_TEXT and CITATION_BUTTON_LABEL variables imported or defined above
|
| 267 |
citation_button = gr.Textbox(
|
| 268 |
value=CITATION_BUTTON_TEXT,
|
| 269 |
label=CITATION_BUTTON_LABEL,
|
| 270 |
-
lines=
|
| 271 |
-
elem_id="citation-button",
|
| 272 |
show_copy_button=True,
|
| 273 |
)
|
| 274 |
|
| 275 |
-
#
|
| 276 |
-
# content_copy download
|
| 277 |
-
# Use code with caution.
|
| 278 |
-
# IGNORE_WHEN_COPYING_END
|
| 279 |
-
|
| 280 |
-
# --- Keep scheduler if relevant ---
|
| 281 |
-
# Only start scheduler if the script is run directly
|
| 282 |
if __name__ == "__main__":
|
| 283 |
try:
|
| 284 |
scheduler = BackgroundScheduler()
|
| 285 |
-
# Add job only if restart_space is callable (i.e., not a placeholder or failed import)
|
| 286 |
if callable(restart_space):
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
else:
|
| 294 |
-
|
| 295 |
except Exception as e:
|
| 296 |
print(f"Failed to initialize or start scheduler: {e}")
|
| 297 |
|
| 298 |
-
|
| 299 |
-
# --- Launch the app ---
|
| 300 |
-
# Ensures the app launches only when the script is run directly
|
| 301 |
-
if __name__ == "__main__":
|
| 302 |
-
# Ensure you have installed necessary libraries: pip install gradio pandas apscheduler
|
| 303 |
-
# Make sure your src module files (about.py etc.) are accessible OR use the placeholder definitions above.
|
| 304 |
print("Launching Gradio App...")
|
|
|
|
| 305 |
demo.launch()
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
import gradio as gr
|
| 3 |
import pandas as pd
|
| 4 |
from apscheduler.schedulers.background import BackgroundScheduler
|
| 5 |
# Removed Hugging Face Hub imports as they are not needed for the simplified leaderboard
|
| 6 |
|
| 7 |
# --- Make sure these imports work relative to your file structure ---
|
|
|
|
| 8 |
try:
|
| 9 |
+
# Assume these contain the *content* without excessive inline styling
|
| 10 |
from src.about import (
|
| 11 |
CITATION_BUTTON_LABEL,
|
| 12 |
CITATION_BUTTON_TEXT,
|
| 13 |
EVALUATION_QUEUE_TEXT, # Keep if used by commented-out submit tab
|
| 14 |
INTRODUCTION_TEXT,
|
| 15 |
LLM_BENCHMARKS_TEXT,
|
| 16 |
+
TITLE, # Expected to have an ID like #main-leaderboard-title
|
| 17 |
)
|
| 18 |
+
# Import custom_css if it exists, otherwise it will be defined below
|
| 19 |
+
try:
|
| 20 |
+
from src.display.css_html_js import custom_css
|
| 21 |
+
except ImportError:
|
| 22 |
+
print("Warning: src.display.css_html_js not found. Starting with empty custom_css.")
|
| 23 |
+
custom_css = "" # Start fresh if not found
|
| 24 |
+
|
| 25 |
from src.envs import REPO_ID # Keep if needed for restart_space or other functions
|
| 26 |
from src.submission.submit import add_new_eval # Keep if using the submit tab
|
| 27 |
print("Successfully imported from src module.")
|
| 28 |
+
# Option 2: Placeholder definitions (REMOVE IF USING OPTION 1)
|
| 29 |
except ImportError:
|
| 30 |
print("Warning: Using placeholder values because src module imports failed.")
|
| 31 |
CITATION_BUTTON_LABEL="Citation"
|
| 32 |
CITATION_BUTTON_TEXT="Please cite us if you use this benchmark..."
|
| 33 |
EVALUATION_QUEUE_TEXT="Current evaluation queue:"
|
| 34 |
+
# Example placeholders with structure for CSS
|
| 35 |
+
TITLE="""<h1 id="main-leaderboard-title" align="center">๐ MLE-Dojo Benchmark Leaderboard (Placeholder)</h1>"""
|
| 36 |
+
INTRODUCTION_TEXT="""
|
| 37 |
+
<div class="introduction-section">
|
| 38 |
+
<p>Welcome to the MLE-Dojo Benchmark Leaderboard (Placeholder Content).</p>
|
| 39 |
+
<p>Edit <code>src/about.py</code> to set your actual title and introduction text.</p>
|
| 40 |
+
</div>
|
| 41 |
+
"""
|
| 42 |
+
LLM_BENCHMARKS_TEXT="""
|
| 43 |
+
## About Section (Placeholder)
|
| 44 |
+
Information about the benchmarks will go here. Edit <code>src/about.py</code>.
|
| 45 |
+
"""
|
| 46 |
+
custom_css="" # Start with empty CSS
|
| 47 |
REPO_ID="your/space-id" # Replace with actual ID if needed
|
| 48 |
def add_new_eval(*args): return "Submission placeholder."
|
| 49 |
# --- End Placeholder Definitions ---
|
| 50 |
|
| 51 |
|
| 52 |
# --- Elo Leaderboard Configuration ---
|
| 53 |
+
# (Keep your data definition as is)
|
|
|
|
|
|
|
| 54 |
data = [
|
| 55 |
+
{'model_name': 'gpt-4o-mini', 'url': 'https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/', 'organizer': 'OpenAI', 'license': 'Proprietary', 'MLE-Lite_Elo': 753, 'Tabular_Elo': 839, 'NLP_Elo': 758, 'CV_Elo': 754, 'Overall': 778},
|
| 56 |
+
{'model_name': 'gpt-4o', 'url': 'https://openai.com/index/hello-gpt-4o/', 'organizer': 'OpenAI', 'license': 'Proprietary', 'MLE-Lite_Elo': 830, 'Tabular_Elo': 861, 'NLP_Elo': 903, 'CV_Elo': 761, 'Overall': 841},
|
| 57 |
+
{'model_name': 'o3-mini', 'url': 'https://openai.com/index/openai-o3-mini/', 'organizer': 'OpenAI', 'license': 'Proprietary', 'MLE-Lite_Elo': 1108, 'Tabular_Elo': 1019, 'NLP_Elo': 1056, 'CV_Elo': 1207, 'Overall': 1096}, # Fill details later
|
| 58 |
+
{'model_name': 'deepseek-v3', 'url': 'https://api-docs.deepseek.com/news/news1226', 'organizer': 'DeepSeek', 'license': 'DeepSeek', 'MLE-Lite_Elo': 1004, 'Tabular_Elo': 1015, 'NLP_Elo': 1028, 'CV_Elo': 1067, 'Overall': 1023},
|
| 59 |
+
{'model_name': 'deepseek-r1', 'url': 'https://api-docs.deepseek.com/news/news250120', 'organizer': 'DeepSeek', 'license': 'DeepSeek', 'MLE-Lite_Elo': 1137, 'Tabular_Elo': 1053, 'NLP_Elo': 1103, 'CV_Elo': 1083, 'Overall': 1100},
|
| 60 |
+
{'model_name': 'gemini-2.0-flash', 'url': 'https://ai.google.dev/gemini-api/docs/models#gemini-2.0-flash', 'organizer': 'Google', 'license': 'Proprietary', 'MLE-Lite_Elo': 847, 'Tabular_Elo': 923, 'NLP_Elo': 860, 'CV_Elo': 978, 'Overall': 895},
|
| 61 |
+
{'model_name': 'gemini-2.0-pro', 'url': 'https://blog.google/technology/google-deepmind/gemini-model-updates-february-2025/', 'organizer': 'Google', 'license': 'Proprietary', 'MLE-Lite_Elo': 1064, 'Tabular_Elo': 1139, 'NLP_Elo': 1028, 'CV_Elo': 973, 'Overall': 1054},
|
| 62 |
+
{'model_name': 'gemini-2.5-pro', 'url': 'https://deepmind.google/technologies/gemini/pro/', 'organizer': 'Google', 'license': 'Proprietary', 'MLE-Lite_Elo': 1257, 'Tabular_Elo': 1150, 'NLP_Elo': 1266, 'CV_Elo': 1177, 'Overall': 1214},
|
| 63 |
]
|
|
|
|
|
|
|
|
|
|
| 64 |
master_df = pd.DataFrame(data)
|
| 65 |
+
CATEGORIES = ["Overall", "MLE-Lite", "Tabular", "NLP", "CV"]
|
| 66 |
+
DEFAULT_CATEGORY = "Overall"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
category_to_column = {
|
| 68 |
+
"MLE-Lite": "MLE-Lite_Elo", "Tabular": "Tabular_Elo",
|
| 69 |
+
"NLP": "NLP_Elo", "CV": "CV_Elo", "Overall": "Overall"
|
|
|
|
|
|
|
|
|
|
| 70 |
}
|
| 71 |
|
| 72 |
# --- Helper function to update leaderboard ---
|
|
|
|
| 79 |
if score_column is None or score_column not in master_df.columns:
|
| 80 |
print(f"Warning: Invalid category '{category}' or column '{score_column}'. Falling back to default.")
|
| 81 |
score_column = category_to_column[DEFAULT_CATEGORY]
|
|
|
|
| 82 |
if score_column not in master_df.columns:
|
|
|
|
|
|
|
| 83 |
print(f"Error: Default column '{score_column}' also not found.")
|
| 84 |
+
# Return empty df with desired display columns
|
| 85 |
return pd.DataFrame({
|
| 86 |
+
"Rank": [], "Model": [], "Organizer": [], "License": [], "Elo Score": []
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
})
|
| 88 |
|
|
|
|
|
|
|
| 89 |
cols_to_select = ['model_name', 'url', 'organizer', 'license', score_column]
|
| 90 |
df = master_df[cols_to_select].copy()
|
|
|
|
|
|
|
| 91 |
df.sort_values(by=score_column, ascending=False, inplace=True)
|
|
|
|
|
|
|
| 92 |
df.reset_index(drop=True, inplace=True)
|
| 93 |
df.insert(0, 'Rank', df.index + 1)
|
| 94 |
|
| 95 |
+
# Format Model Name as HTML Hyperlink - use a CSS class for styling
|
|
|
|
| 96 |
df['Model'] = df.apply(
|
| 97 |
+
lambda row: f"<a href='{row['url'] if pd.notna(row['url']) else '#'}' target='_blank' class='model-link'>{row['model_name']}</a>",
|
| 98 |
axis=1
|
| 99 |
)
|
| 100 |
|
| 101 |
+
# Rename columns for final display
|
| 102 |
+
df.rename(columns={score_column: 'Elo Score', 'organizer': 'Organizer', 'license': 'License'}, inplace=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
final_columns = ["Rank", "Model", "Organizer", "License", "Elo Score"]
|
| 104 |
df = df[final_columns]
|
|
|
|
|
|
|
|
|
|
| 105 |
return df
|
| 106 |
|
| 107 |
# --- Mock/Placeholder functions/data for other tabs ---
|
|
|
|
| 108 |
print("Warning: Evaluation queue data fetching is disabled/mocked due to leaderboard changes.")
|
| 109 |
finished_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
|
| 110 |
running_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
|
| 111 |
pending_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
|
| 112 |
+
EVAL_COLS = ["Model", "Status", "Requested", "Started"]
|
| 113 |
+
EVAL_TYPES = ["str", "str", "str", "str"]
|
| 114 |
|
| 115 |
# --- Keep restart function if relevant ---
|
| 116 |
def restart_space():
|
|
|
|
| 117 |
print(f"Attempting to restart space: {REPO_ID}")
|
| 118 |
+
# Replace with your actual space restart mechanism if needed
|
|
|
|
| 119 |
|
| 120 |
+
# --- Enhanced CSS Definition ---
|
| 121 |
+
# Define all styles here. Assumes TITLE has id="main-leaderboard-title"
|
| 122 |
+
# and INTRODUCTION_TEXT is wrapped in class="introduction-section" (or rendered by gr.Markdown).
|
| 123 |
|
| 124 |
+
enhanced_css = """
|
| 125 |
+
/* Base and Theme Overrides */
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
body {
|
| 127 |
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
|
| 128 |
+
font-size: 16px; /* Base font size */
|
| 129 |
+
line-height: 1.6;
|
| 130 |
+
background-color: #f8f9fa; /* Light background */
|
| 131 |
+
color: #343a40; /* Default text color */
|
| 132 |
}
|
| 133 |
+
|
| 134 |
+
/* Container adjustments for better spacing */
|
| 135 |
+
.gradio-container {
|
| 136 |
+
max-width: 1200px !important; /* Limit max width */
|
| 137 |
+
margin: 0 auto !important; /* Center the container */
|
| 138 |
+
padding: 2rem !important; /* Add padding around the whole app */
|
| 139 |
}
|
| 140 |
+
|
| 141 |
+
/* --- Title Styling --- */
|
| 142 |
+
/* Targets the h1 tag with the specific ID from src/about.py */
|
| 143 |
+
#main-leaderboard-title {
|
| 144 |
+
font-size: 2.8em; /* Large title */
|
| 145 |
+
font-weight: 700; /* Bolder */
|
| 146 |
+
color: #212529; /* Darker color for title */
|
| 147 |
+
text-align: center; /* Ensure centering */
|
| 148 |
+
margin-bottom: 1.5rem; /* Space below title */
|
| 149 |
+
padding-bottom: 0.5rem; /* Space within the element */
|
| 150 |
+
border-bottom: 2px solid #dee2e6; /* Subtle underline */
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
/* --- Introduction Text Styling --- */
|
| 154 |
+
/* Targets the wrapper div or the markdown component */
|
| 155 |
+
.introduction-section p, .introduction-wrapper .prose p { /* Target paragraphs within the section */
|
| 156 |
+
font-size: 1.15em; /* Slightly larger than base */
|
| 157 |
+
color: #495057; /* Slightly lighter text color */
|
| 158 |
+
margin-bottom: 1rem; /* Space between paragraphs */
|
| 159 |
+
max-width: 900px; /* Limit width for readability */
|
| 160 |
+
margin-left: auto; /* Center the text block */
|
| 161 |
+
margin-right: auto; /* Center the text block */
|
| 162 |
+
text-align: center; /* Center align intro text */
|
| 163 |
+
}
|
| 164 |
+
.introduction-section, .introduction-wrapper {
|
| 165 |
+
margin-bottom: 2.5rem; /* Space below the intro block */
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
/* --- General Markdown and Header Styling --- */
|
| 170 |
+
.markdown-text h2, .tabitem .prose h2 { /* Target section headers */
|
| 171 |
+
font-size: 1.8em;
|
| 172 |
+
font-weight: 600;
|
| 173 |
+
color: #343a40;
|
| 174 |
+
margin-top: 2.5rem; /* More space above sections */
|
| 175 |
+
margin-bottom: 1.2rem;
|
| 176 |
+
padding-bottom: 0.4rem;
|
| 177 |
+
border-bottom: 1px solid #e9ecef;
|
| 178 |
+
}
|
| 179 |
+
.markdown-text p, .tabitem .prose p {
|
| 180 |
+
font-size: 1em; /* Standard paragraph size */
|
| 181 |
+
margin-bottom: 1rem;
|
| 182 |
+
color: #495057;
|
| 183 |
+
}
|
| 184 |
+
.markdown-text a, .tabitem .prose a { /* Style links within markdown */
|
| 185 |
+
color: #007bff;
|
| 186 |
+
text-decoration: none;
|
| 187 |
+
}
|
| 188 |
+
.markdown-text a:hover, .tabitem .prose a:hover {
|
| 189 |
+
text-decoration: underline;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
/* --- Tab Styling --- */
|
| 193 |
+
.tab-buttons button { /* Style tab buttons */
|
| 194 |
+
font-size: 1.1em !important;
|
| 195 |
+
padding: 10px 20px !important;
|
| 196 |
+
font-weight: 500;
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
/* --- Leaderboard Table Styling --- */
|
| 200 |
+
#leaderboard-table {
|
| 201 |
+
margin-top: 1.5rem; /* Space above table */
|
| 202 |
+
font-size: 1em; /* Ensure table font size is consistent */
|
| 203 |
+
border: 1px solid #dee2e6;
|
| 204 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.05); /* Subtle shadow */
|
| 205 |
+
}
|
| 206 |
+
#leaderboard-table th {
|
| 207 |
+
background-color: #e9ecef; /* Header background */
|
| 208 |
+
font-weight: 600; /* Header font weight */
|
| 209 |
+
padding: 12px 15px; /* Header padding */
|
| 210 |
+
text-align: left;
|
| 211 |
+
color: #495057;
|
| 212 |
+
white-space: nowrap; /* Prevent header text wrapping */
|
| 213 |
+
}
|
| 214 |
+
#leaderboard-table td {
|
| 215 |
+
padding: 12px 15px; /* Cell padding */
|
| 216 |
+
border-bottom: 1px solid #e9ecef; /* Horizontal lines */
|
| 217 |
+
vertical-align: middle; /* Center cell content vertically */
|
| 218 |
+
}
|
| 219 |
+
#leaderboard-table tr:nth-child(even) td {
|
| 220 |
+
background-color: #f8f9fa; /* Zebra striping */
|
| 221 |
+
}
|
| 222 |
+
#leaderboard-table tr:hover td {
|
| 223 |
+
background-color: #e2e6ea; /* Hover effect */
|
| 224 |
+
}
|
| 225 |
+
/* Style for the model links within the table */
|
| 226 |
+
#leaderboard-table .model-link {
|
| 227 |
+
color: #0056b3; /* Slightly darker blue for links */
|
| 228 |
+
font-weight: 500;
|
| 229 |
+
text-decoration: none;
|
| 230 |
+
}
|
| 231 |
+
#leaderboard-table .model-link:hover {
|
| 232 |
+
text-decoration: underline;
|
| 233 |
+
color: #003d80;
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
/* --- Radio Button / Category Selector Styling --- */
|
| 237 |
+
.gradio-radio label span { /* Target the label text */
|
| 238 |
+
font-size: 1.1em !important;
|
| 239 |
+
font-weight: 500;
|
| 240 |
+
color: #343a40;
|
| 241 |
+
}
|
| 242 |
+
.gradio-radio fieldset { /* Adjust spacing around radio buttons */
|
| 243 |
+
margin-top: 0.5rem;
|
| 244 |
+
margin-bottom: 1.5rem;
|
| 245 |
+
}
|
| 246 |
+
.gradio-radio fieldset label { /* Style individual radio choices */
|
| 247 |
+
padding: 8px 12px !important;
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
/* --- Accordion Styling --- */
|
| 252 |
+
.gradio-accordion > button { /* Accordion header */
|
| 253 |
+
font-size: 1.2em !important;
|
| 254 |
+
font-weight: 600;
|
| 255 |
+
padding: 12px 15px !important;
|
| 256 |
+
background-color: #f1f3f5 !important;
|
| 257 |
+
border-bottom: 1px solid #dee2e6 !important;
|
| 258 |
+
}
|
| 259 |
+
.gradio-accordion > div { /* Accordion content area */
|
| 260 |
+
padding: 15px !important;
|
| 261 |
+
border: 1px solid #dee2e6 !important;
|
| 262 |
+
border-top: none !important;
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
/* --- Textbox/Button Styling (e.g., Citation) --- */
|
| 266 |
+
#citation-button textarea {
|
| 267 |
+
font-family: 'Courier New', Courier, monospace; /* Monospace for code/citation */
|
| 268 |
+
font-size: 0.95em !important;
|
| 269 |
+
background-color: #e9ecef;
|
| 270 |
+
color: #343a40;
|
| 271 |
+
}
|
| 272 |
+
#citation-button label span {
|
| 273 |
+
font-weight: 600;
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
"""
|
| 277 |
|
| 278 |
+
# Combine any existing CSS with the new enhanced CSS
|
| 279 |
+
# Prioritize enhanced_css rules by placing it last or using more specific selectors
|
| 280 |
+
final_css = custom_css + "\n" + enhanced_css
|
| 281 |
+
|
| 282 |
+
# --- Gradio App Definition ---
|
| 283 |
+
# Use a theme for base styling and apply custom CSS overrides
|
| 284 |
+
demo = gr.Blocks(css=final_css, theme=gr.themes.Soft(
|
| 285 |
+
# Optional: Customize theme variables if needed
|
| 286 |
+
# primary_hue=gr.themes.colors.blue,
|
| 287 |
+
# secondary_hue=gr.themes.colors.gray,
|
| 288 |
+
# neutral_hue=gr.themes.colors.cool_gray,
|
| 289 |
+
))
|
| 290 |
|
| 291 |
with demo:
|
| 292 |
+
# Render TITLE from src/about.py (expects <h1 id="main-leaderboard-title">...)
|
| 293 |
gr.HTML(TITLE)
|
| 294 |
|
| 295 |
+
# Render INTRODUCTION_TEXT from src/about.py
|
| 296 |
+
# Add a wrapper class for CSS targeting if the text itself doesn't have one
|
| 297 |
+
with gr.Row():
|
| 298 |
+
gr.Markdown(INTRODUCTION_TEXT, elem_classes="introduction-wrapper") # Use this class for CSS
|
| 299 |
|
| 300 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 301 |
with gr.TabItem("๐
MLE-Dojo Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
| 302 |
with gr.Column():
|
| 303 |
+
# Use standard Markdown for the section header, CSS will style it
|
| 304 |
+
gr.Markdown("## Model Elo Rankings by Category", elem_classes="markdown-text")
|
| 305 |
category_selector = gr.Radio(
|
| 306 |
choices=CATEGORIES,
|
| 307 |
+
label="Select Category:", # Label is styled via CSS
|
| 308 |
value=DEFAULT_CATEGORY,
|
| 309 |
interactive=True,
|
| 310 |
+
elem_classes="gradio-radio" # Add class for styling
|
| 311 |
)
|
| 312 |
leaderboard_df_component = gr.Dataframe(
|
|
|
|
| 313 |
value=update_leaderboard(DEFAULT_CATEGORY),
|
|
|
|
| 314 |
headers=["Rank", "Model", "Organizer", "License", "Elo Score"],
|
|
|
|
| 315 |
datatype=["number", "html", "str", "str", "number"],
|
| 316 |
interactive=False,
|
| 317 |
+
row_count=(len(master_df), "fixed"),
|
|
|
|
|
|
|
| 318 |
col_count=(5, "fixed"),
|
| 319 |
+
wrap=True,
|
| 320 |
+
elem_id="leaderboard-table" # Used for specific table CSS
|
| 321 |
)
|
|
|
|
| 322 |
category_selector.change(
|
| 323 |
fn=update_leaderboard,
|
| 324 |
inputs=category_selector,
|
|
|
|
| 326 |
)
|
| 327 |
|
| 328 |
with gr.TabItem("๐ About", elem_id="llm-benchmark-tab-about", id=1):
|
| 329 |
+
# Render LLM_BENCHMARKS_TEXT using Markdown, styled by CSS
|
| 330 |
+
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") # Apply standard markdown styling
|
| 331 |
|
| 332 |
+
# --- Submit Tab (Keep commented out or uncomment and ensure imports/variables are defined) ---
|
|
|
|
| 333 |
# with gr.TabItem("๐ Submit here! ", elem_id="llm-benchmark-tab-submit", id=2):
|
| 334 |
+
# # ... (Your submission form code - apply elem_classes="markdown-text" to gr.Markdown) ...
|
| 335 |
+
# pass # Placeholder
|
| 336 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 337 |
|
| 338 |
# --- Citation Row (at the bottom, outside Tabs) ---
|
| 339 |
+
with gr.Accordion("๐ Citation", open=False, elem_classes="gradio-accordion"): # Add class
|
|
|
|
| 340 |
citation_button = gr.Textbox(
|
| 341 |
value=CITATION_BUTTON_TEXT,
|
| 342 |
label=CITATION_BUTTON_LABEL,
|
| 343 |
+
lines=8, # Adjusted lines slightly
|
| 344 |
+
elem_id="citation-button", # Used for specific CSS
|
| 345 |
show_copy_button=True,
|
| 346 |
)
|
| 347 |
|
| 348 |
+
# --- Scheduler and Launch ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
if __name__ == "__main__":
|
| 350 |
try:
|
| 351 |
scheduler = BackgroundScheduler()
|
|
|
|
| 352 |
if callable(restart_space):
|
| 353 |
+
if REPO_ID and REPO_ID != "your/space-id":
|
| 354 |
+
scheduler.add_job(restart_space, "interval", seconds=1800)
|
| 355 |
+
scheduler.start()
|
| 356 |
+
print("Scheduler started for space restart.")
|
| 357 |
+
else:
|
| 358 |
+
print("Warning: REPO_ID not set or is placeholder; space restart job not scheduled.")
|
| 359 |
else:
|
| 360 |
+
print("Warning: restart_space function not available; space restart job not scheduled.")
|
| 361 |
except Exception as e:
|
| 362 |
print(f"Failed to initialize or start scheduler: {e}")
|
| 363 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 364 |
print("Launching Gradio App...")
|
| 365 |
+
# demo.queue() # Consider adding queue() for better handling under load
|
| 366 |
demo.launch()
|