khulnasoft commited on
Commit
9fc38f9
β€’
1 Parent(s): 28ca423

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +258 -0
app.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # some code blocks are taken from https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/tree/main
2
+ import json
3
+ import os
4
+ from datetime import datetime, timezone
5
+
6
+ import gradio as gr
7
+ import pandas as pd
8
+ from huggingface_hub import HfApi
9
+
10
+ from src.css_html import custom_css
11
+ from src.text_content import ABOUT_TEXT, SUBMISSION_TEXT_3
12
+ from src.utils import (
13
+ AutoEvalColumn,
14
+ fields,
15
+ is_model_on_hub,
16
+ make_clickable_names,
17
+ plot_throughput,
18
+ styled_error,
19
+ styled_message,
20
+ )
21
+
22
+ TOKEN = os.environ.get("HF_TOKEN", None)
23
+ api = HfApi(TOKEN)
24
+ df = pd.read_csv("data/code_eval_board.csv")
25
+
26
+ QUEUE_REPO = "deepcode-ai/evaluation-requests"
27
+ EVAL_REQUESTS_PATH = "eval-queue"
28
+ COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
29
+ TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden]
30
+ COLS_LITE = [
31
+ c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden
32
+ ]
33
+ TYPES_LITE = [
34
+ c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden
35
+ ]
36
+
37
+
38
+ def add_new_eval(
39
+ model: str,
40
+ revision: str,
41
+ precision: str,
42
+ model_type: str,
43
+ ):
44
+ precision = precision
45
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
46
+
47
+ if model_type is None or model_type == "":
48
+ return styled_error("Please select a model type.")
49
+
50
+ # check the model actually exists before adding the eval
51
+ if revision == "":
52
+ revision = "main"
53
+
54
+ model_on_hub, error = is_model_on_hub(model, revision)
55
+ if not model_on_hub:
56
+ return styled_error(f'Model "{model}" {error}')
57
+
58
+ print("adding new eval")
59
+
60
+ eval_entry = {
61
+ "model": model,
62
+ "revision": revision,
63
+ "precision": precision,
64
+ "status": "PENDING",
65
+ "submitted_time": current_time,
66
+ "model_type": model_type.split(" ")[1],
67
+ }
68
+
69
+ user_name = ""
70
+ model_path = model
71
+ if "/" in model:
72
+ user_name = model.split("/")[0]
73
+ model_path = model.split("/")[1]
74
+
75
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
76
+ os.makedirs(OUT_DIR, exist_ok=True)
77
+ out_path = f"{OUT_DIR}/{model_path}_eval_request_{precision}.json"
78
+ print(f"Saving eval request to {out_path}")
79
+
80
+ with open(out_path, "w") as f:
81
+ f.write(json.dumps(eval_entry))
82
+
83
+ api.upload_file(
84
+ path_or_fileobj=out_path,
85
+ path_in_repo=out_path.split("eval-queue/")[1],
86
+ repo_id=QUEUE_REPO,
87
+ repo_type="dataset",
88
+ commit_message=f"Add {model} to eval queue",
89
+ )
90
+
91
+ # remove the local file
92
+ os.remove(out_path)
93
+
94
+ return styled_message("Your request has been submitted to the evaluation queue!\n")
95
+
96
+
97
+ def select_columns(df, columns):
98
+ always_here_cols = [
99
+ AutoEvalColumn.model_type_symbol.name,
100
+ AutoEvalColumn.model.name,
101
+ ]
102
+ # We use COLS to maintain sorting
103
+ filtered_df = df[
104
+ always_here_cols + [c for c in COLS if c in df.columns and c in columns]
105
+ ]
106
+ return filtered_df
107
+
108
+
109
+ def filter_items(df, leaderboard_table, query):
110
+ if query == "all":
111
+ return df[leaderboard_table.columns]
112
+ else:
113
+ query = query[0]
114
+ filtered_df = df[df["T"].str.contains(query, na=False)]
115
+ return filtered_df[leaderboard_table.columns]
116
+
117
+
118
+ def search_table(df, leaderboard_table, query):
119
+ filtered_df = df[(df["Model"].str.contains(query, case=False))]
120
+ return filtered_df[leaderboard_table.columns]
121
+
122
+
123
+ df = make_clickable_names(df)
124
+
125
+ # <div style='background-color: #F5F1CB; text-align: center; padding: 10px;'>
126
+ # <p><b>Warning</b>: This leaderboard is not regularily updated with the latest instruction-tuned code models, check the <b>Submit Results</b> section for submitting new evaluation results.
127
+ # You can also check other code leaderboards like <a href="https://evalplus.github.io/leaderboard.html">EvalPlus</a> & <a href="https://huggingface.co/spaces/mike-ravkine/can-ai-code-results">Can-AI-Code</a> .</p>
128
+ # </div>
129
+ demo = gr.Blocks(css=custom_css)
130
+ with demo:
131
+ with gr.Row():
132
+ gr.Markdown(
133
+ """<div style="text-align: center;"><h1> ⭐ Deep <span style='color: #e6b800;'>Code</span> Models <span style='color: #e6b800;'>Leaderboard</span></h1></div>\
134
+ <br>\
135
+ <p>Inspired from the <a href="https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard">πŸ€— Open LLM Leaderboard</a> and <a href="https://huggingface.co/spaces/optimum/llm-perf-leaderboard">πŸ€— Open LLM-Perf Leaderboard πŸ‹οΈ</a>, we compare performance of base multilingual code generation models on <a href="https://huggingface.co/datasets/openai_humaneval">HumanEval</a> benchmark and <a href="https://huggingface.co/datasets/nuprl/MultiPL-E">MultiPL-E</a>. We also measure throughput and provide\
136
+ information about the models. We only compare open pre-trained multilingual code models, that people can start from as base models for their trainings.</p>
137
+ """,
138
+ elem_classes="markdown-text",
139
+ )
140
+
141
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
142
+ with gr.Column():
143
+ with gr.Tabs(elem_classes="A100-tabs") as A100_tabs:
144
+ with gr.TabItem("πŸ” Evaluation table", id=0):
145
+ with gr.Column():
146
+ with gr.Accordion("➑️ See All Columns", open=False):
147
+ shown_columns = gr.CheckboxGroup(
148
+ choices=[
149
+ c
150
+ for c in COLS
151
+ if c
152
+ not in [
153
+ AutoEvalColumn.dummy.name,
154
+ AutoEvalColumn.model.name,
155
+ AutoEvalColumn.model_type_symbol.name,
156
+ ]
157
+ ],
158
+ value=[
159
+ c
160
+ for c in COLS_LITE
161
+ if c
162
+ not in [
163
+ AutoEvalColumn.dummy.name,
164
+ AutoEvalColumn.model.name,
165
+ AutoEvalColumn.model_type_symbol.name,
166
+ ]
167
+ ],
168
+ label="",
169
+ elem_id="column-select",
170
+ interactive=True,
171
+ )
172
+ # with gr.Column(min_width=780):
173
+ with gr.Row():
174
+ search_bar = gr.Textbox(
175
+ placeholder="πŸ” Search for your model and press ENTER...",
176
+ show_label=False,
177
+ elem_id="search-bar",
178
+ )
179
+ filter_columns = gr.Radio(
180
+ label="⏚ Filter model types",
181
+ choices=["all", "🟒 base", "πŸ”Ά instruction-tuned", "EXT external-evaluation"],
182
+ value="all",
183
+ elem_id="filter-columns",
184
+ )
185
+
186
+ leaderboard_df = gr.components.Dataframe(
187
+ value=df[
188
+ [
189
+ AutoEvalColumn.model_type_symbol.name,
190
+ AutoEvalColumn.model.name,
191
+ ]
192
+ + shown_columns.value
193
+ ],
194
+ headers=[
195
+ AutoEvalColumn.model_type_symbol.name,
196
+ AutoEvalColumn.model.name,
197
+ ]
198
+ + shown_columns.value,
199
+ datatype=TYPES,
200
+ elem_id="leaderboard-table",
201
+ interactive=False,
202
+ )
203
+
204
+ hidden_leaderboard_df = gr.components.Dataframe(
205
+ value=df,
206
+ headers=COLS,
207
+ datatype=["str" for _ in range(len(COLS))],
208
+ visible=False,
209
+ )
210
+ search_bar.submit(
211
+ search_table,
212
+ [hidden_leaderboard_df, leaderboard_df, search_bar],
213
+ leaderboard_df,
214
+ )
215
+ filter_columns.change(
216
+ filter_items,
217
+ [hidden_leaderboard_df, leaderboard_df, filter_columns],
218
+ leaderboard_df,
219
+ )
220
+ shown_columns.change(
221
+ select_columns,
222
+ [hidden_leaderboard_df, shown_columns],
223
+ leaderboard_df,
224
+ )
225
+ gr.Markdown(
226
+ """
227
+ **Notes:**
228
+ - Win Rate represents how often a model outperforms other models in each language, averaged across all languages.
229
+ - The scores of instruction-tuned models might be significantly higher on humaneval-python than other languages. We use the instruction format of HumanEval. For other languages, we use base MultiPL-E prompts.
230
+ - For more details check the πŸ“ About section.
231
+ - Models with a πŸ”΄ symbol represent external evaluation submission, this means that we didn't verify the results, you can find the author's submission under `Submission PR` field from `See All Columns` tab.
232
+ """,
233
+ elem_classes="markdown-text",
234
+ )
235
+
236
+ with gr.TabItem("πŸ“Š Performance Plot", id=1):
237
+ with gr.Row():
238
+ bs_1_plot = gr.components.Plot(
239
+ value=plot_throughput(df, bs=1),
240
+ elem_id="bs1-plot",
241
+ show_label=False,
242
+ )
243
+ bs_50_plt = gr.components.Plot(
244
+ value=plot_throughput(df, bs=50),
245
+ elem_id="bs50-plot",
246
+ show_label=False,
247
+ )
248
+ gr.Markdown(
249
+ "**Note:** The throughputs for some models are missing and might appear as zero.",
250
+ elem_classes="markdown-text",
251
+ )
252
+ with gr.TabItem("πŸ“ About", id=2):
253
+ gr.Markdown(ABOUT_TEXT, elem_classes="markdown-text")
254
+ with gr.TabItem("Submit results πŸš€", id=3):
255
+ gr.Markdown(SUBMISSION_TEXT_3)
256
+
257
+
258
+ demo.launch()