Spaces:
Running
Running
| import gradio as gr | |
| import pandas as pd | |
| import os | |
| from huggingface_hub import snapshot_download | |
| from apscheduler.schedulers.background import BackgroundScheduler | |
| from src.display.about import ( | |
| CITATION_BUTTON_LABEL, | |
| CITATION_BUTTON_TEXT, | |
| EVALUATION_QUEUE_TEXT, | |
| INTRODUCTION_TEXT, | |
| LLM_BENCHMARKS_TEXT, | |
| TITLE, | |
| ) | |
| from src.display.css_html_js import custom_css | |
| from src.envs import API | |
| from src.leaderboard.load_results import load_data | |
| # clone / pull the lmeh eval data | |
| TOKEN = os.environ.get("TOKEN", None) | |
| RESULTS_REPO = f"SeaLLMs/SeaExam-results" | |
| CACHE_PATH=os.getenv("HF_HOME", ".") | |
| EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results") | |
| print(EVAL_RESULTS_PATH) | |
| snapshot_download( | |
| repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", | |
| token=TOKEN | |
| ) | |
| def restart_space(): | |
| API.restart_space(repo_id="SeaLLMs/SeaExam_leaderboard", token=TOKEN) | |
| # Load the data from the csv file | |
| csv_path = f'{EVAL_RESULTS_PATH}/SeaExam_results.csv' | |
| df_m3exam, df_mmlu, df_avg = load_data(csv_path) | |
| demo = gr.Blocks(css=custom_css) | |
| with demo: | |
| gr.HTML(TITLE) | |
| # gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") | |
| with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
| with gr.TabItem("π Overall", elem_id="llm-benchmark-Sum", id=0): | |
| leaderboard_table = gr.components.Dataframe( | |
| value=df_avg, | |
| # value=leaderboard_df[ | |
| # [c.name for c in fields(AutoEvalColumn) if c.never_hidden] | |
| # + shown_columns.value | |
| # + [AutoEvalColumn.dummy.name] | |
| # ], | |
| # headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value, | |
| # datatype=TYPES, | |
| # elem_id="leaderboard-table", | |
| interactive=False, | |
| visible=True, | |
| # column_widths=["20%", "6%", "8%", "6%", "8%", "8%", "6%", "6%", "6%", "6%", "6%"], | |
| ) | |
| with gr.TabItem("π M3Exam", elem_id="llm-benchmark-M3Exam", id=1): | |
| leaderboard_table = gr.components.Dataframe( | |
| value=df_m3exam, | |
| interactive=False, | |
| visible=True, | |
| ) | |
| with gr.TabItem("π MMLU", elem_id="llm-benchmark-MMLU", id=2): | |
| leaderboard_table = gr.components.Dataframe( | |
| value=df_mmlu, | |
| interactive=False, | |
| visible=True, | |
| ) | |
| with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=3): | |
| gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") | |
| demo.launch() | |
| scheduler = BackgroundScheduler() | |
| scheduler.add_job(restart_space, "interval", seconds=1800) | |
| scheduler.start() | |
| demo.queue(default_concurrency_limit=40).launch(share=True) | |