Amirhoseinsh commited on
Commit
3e8a2cc
1 Parent(s): 97d5130

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +247 -0
app.py CHANGED
@@ -34,3 +34,250 @@ summ_eval_metrics = ['BLEU', 'CHARF', 'TER']
34
  qas_eval_metrics = ['F1', 'EXACT-MATCH']
35
  mts_eval_metrics = ['CHARF', 'BLEU', 'TER']
36
  mcq_eval_metrics = ['MC1', 'MC2']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  qas_eval_metrics = ['F1', 'EXACT-MATCH']
35
  mts_eval_metrics = ['CHARF', 'BLEU', 'TER']
36
  mcq_eval_metrics = ['MC1', 'MC2']
37
+
38
+
39
+
40
+ with tab1:
41
+ c, col1, cc, col2 = st.columns([.55, 2, .3, 2], gap="small")
42
+
43
+ with col1:
44
+ eval_tasks = st.radio( "Select An Evaluation Task:",
45
+ ('Text Summarization', 'Question Answering',
46
+ 'Machine Translation', 'Multiple Choice QNs'),
47
+ horizontal=True)
48
+
49
+ with col2:
50
+ model_type = st.radio( "Select A Model Type:",
51
+ ('All', 'Quantized', 'Pretrained',
52
+ 'Fine\u2013tuned', 'Instruction\u2014tuned'),
53
+ horizontal=True)
54
+
55
+
56
+ if eval_tasks=='Text Summarization':
57
+
58
+ select_eval_metrics = st.multiselect( 'Select Multiple Evaluation Metrics:', summ_eval_metrics, ['BLEU', 'CHARF', 'TER'])
59
+
60
+ st.markdown("<br>", unsafe_allow_html=True)
61
+
62
+ summ_eval_data = { 'Type' : ['Quantized', 'Pretrained', 'Fine\u2013tuned', 'Instruction\u2014tuned'],
63
+ 'Model': ['username/model1', 'username/model2', 'username/model3', 'username/model4'],
64
+ 'BLEU' : [70, 60, 50, 40],
65
+ 'CHARF': [40, 50, 60, 70],
66
+ 'TER' : [50, 70, 40, 60]}
67
+
68
+ llm__dataframe = pd.DataFrame(summ_eval_data)
69
+
70
+ if model_type in ['Quantized', 'Pretrained', 'Fine\u2013tuned', 'Instruction\u2014tuned']:
71
+ llm__dataframe = llm__dataframe.loc[llm__dataframe['Type'] == model_type]
72
+
73
+ selected_columns = ['Model', 'Type'] + select_eval_metrics
74
+
75
+ llm__dataframe = llm__dataframe[selected_columns]
76
+
77
+ llm__dataframe['Model'] = llm__dataframe['Model'].apply(lambda x: f'https://huggingface.co/{x}')
78
+
79
+ st.checkbox("Use container width ▶️", value=True, key="use_container_width")
80
+
81
+ st.data_editor(llm__dataframe, column_config={"Model": st.column_config.LinkColumn("Model")},
82
+ hide_index=True, use_container_width=st.session_state.use_container_width, key="data_editor")
83
+
84
+
85
+ elif eval_tasks=='Question Answering':
86
+
87
+ select_eval_metrics = st.multiselect('Select Multiple Evaluation Metrics:', qas_eval_metrics, ['F1', 'EXACT-MATCH'])
88
+
89
+ st.markdown("<br>", unsafe_allow_html=True)
90
+
91
+ qas_eval_data = { 'Type' : ['Quantized', 'Pretrained', 'Fine\u2013tuned', 'Instruction\u2014tuned'],
92
+ 'Model': ['username/model1', 'username/model2', 'username/model3', 'username/model4'],
93
+ 'F1' : [70, 60, 50, 40],
94
+ 'EXACT-MATCH': [40, 50, 60, 70]}
95
+
96
+ llm__dataframe = pd.DataFrame(qas_eval_data)
97
+
98
+ if model_type in ['Quantized', 'Pretrained', 'Fine\u2013tuned', 'Instruction\u2014tuned']:
99
+ llm__dataframe = llm__dataframe.loc[llm__dataframe['Type'] == model_type]
100
+
101
+ selected_columns = ['Model', 'Type'] + select_eval_metrics
102
+
103
+ llm__dataframe = llm__dataframe[selected_columns]
104
+
105
+ llm__dataframe['Model'] = llm__dataframe['Model'].apply(lambda x: f'https://huggingface.co/{x}')
106
+
107
+ st.checkbox("Use container width ▶️", value=True, key="use_container_width")
108
+
109
+ st.data_editor(llm__dataframe, column_config={"Model": st.column_config.LinkColumn("Model")},
110
+ hide_index=True, use_container_width=st.session_state.use_container_width, key="data_editor1")
111
+
112
+
113
+ if eval_tasks=='Machine Translation':
114
+
115
+ select_eval_metrics = st.multiselect( 'Select Multiple Evaluation Metrics:', mts_eval_metrics, ['BLEU', 'CHARF', 'TER'])
116
+
117
+ st.markdown("<br>", unsafe_allow_html=True)
118
+
119
+ mts_eval_data = { 'Type' : ['Quantized', 'Pretrained', 'Fine\u2013tuned', 'Instruction\u2014tuned'],
120
+ 'Model': ['username/model1', 'username/model2', 'username/model3', 'username/model4'],
121
+ 'BLEU' : [70, 60, 50, 40],
122
+ 'CHARF': [40, 50, 60, 70],
123
+ 'TER' : [50, 70, 40, 60]}
124
+
125
+ llm__dataframe = pd.DataFrame(mts_eval_data)
126
+
127
+ if model_type in ['Quantized', 'Pretrained', 'Fine\u2013tuned', 'Instruction\u2014tuned']:
128
+ llm__dataframe = llm__dataframe.loc[llm__dataframe['Type'] == model_type]
129
+
130
+ selected_columns = ['Model', 'Type'] + select_eval_metrics
131
+
132
+ llm__dataframe = llm__dataframe[selected_columns]
133
+
134
+ llm__dataframe['Model'] = llm__dataframe['Model'].apply(lambda x: f'https://huggingface.co/{x}')
135
+
136
+ st.checkbox("Use container width ▶️", value=True, key="use_container_width")
137
+
138
+ st.data_editor(llm__dataframe, column_config={"Model": st.column_config.LinkColumn("Model")},
139
+ hide_index=True, use_container_width=st.session_state.use_container_width, key="data_editor2")
140
+
141
+
142
+ if eval_tasks=='Multiple Choice QNs':
143
+
144
+ select_eval_metrics = st.multiselect('Select Multiple Evaluation Metrics:', mcq_eval_metrics, ['MC1', 'MC2'])
145
+
146
+ st.markdown("<br>", unsafe_allow_html=True)
147
+
148
+ mcq_eval_data = { 'Type' : ['Quantized', 'Pretrained', 'Fine\u2013tuned', 'Instruction\u2014tuned'],
149
+ 'Model': ['username/model1', 'username/model2', 'username/model3', 'username/model4'],
150
+ 'MC1' : [70, 60, 50, 40],
151
+ 'MC2': [40, 50, 60, 70]}
152
+
153
+ llm__dataframe = pd.DataFrame(mcq_eval_data)
154
+
155
+ if model_type in ['Quantized', 'Pretrained', 'Fine\u2013tuned', 'Instruction\u2014tuned']:
156
+ llm__dataframe = llm__dataframe.loc[llm__dataframe['Type'] == model_type]
157
+
158
+ selected_columns = ['Model', 'Type'] + select_eval_metrics
159
+
160
+ llm__dataframe = llm__dataframe[selected_columns]
161
+
162
+ llm__dataframe['Model'] = llm__dataframe['Model'].apply(lambda x: f'https://huggingface.co/{x}')
163
+
164
+ st.checkbox("Use container width ▶️", value=True, key="use_container_width")
165
+
166
+ st.data_editor(llm__dataframe, column_config={"Model": st.column_config.LinkColumn("Model")},
167
+ hide_index=True, use_container_width=st.session_state.use_container_width, key="data_editor3")
168
+
169
+
170
+
171
+ with tab2:
172
+
173
+ submitted_models = pd.DataFrame(columns=['Model Name','Model HF Name', 'Model Type','Model Precision','Evaluation Tasks'])
174
+
175
+ c, col1 , col2, cc = st.columns([0.2, 1, 3, 0.2], gap="small")
176
+
177
+ with col1:
178
+ model_name = st.text_input("Enter Model Name (required):", placeholder="Enter model's short name", key="model_name")
179
+
180
+
181
+ with col2:
182
+ model_link = st.text_input("Enter Model HuggingFace Name:", placeholder="Enter model's HF Name: username/model", key="model_link")
183
+
184
+
185
+ c, col1 , col2, col3, cc = st.columns([0.2, 1, 1, 2, 0.2], gap="small")
186
+
187
+ with col1:
188
+ model_type = ['Quantized', 'Pretrained', 'Fine\u2013tuned', 'Instruction\u2014tuned']
189
+ selected_model_type = st.selectbox('Select Model Type:', (model_type))#, placeholder="Select a model type")
190
+
191
+ with col2:
192
+ model_precision = ['float32', 'float16', 'bfloat16', '8bit (LLM.int8)', '4bit (QLoRA/FP4)']
193
+ selected_model_precision = st.selectbox('Select Model Precision:', (model_precision))#, placeholder="Select a model precision")
194
+
195
+ with col3:
196
+ eval_tasks = ['All Tasks', 'Text Summarization', 'Question Answering', 'Machine Translation', 'Multiple Choice QNs']
197
+ selected_eval_tasks = st.selectbox('Select An Evaluation Task:', (eval_tasks))#, placeholder="Select an evaluation task")
198
+
199
+
200
+ st.markdown("##")
201
+
202
+
203
+ c, col1 , col2, cc = st.columns([2, 1, 1, 2], gap="small")
204
+
205
+ with col1:
206
+ def clear_text():
207
+ st.session_state["model_name"] = ""
208
+ st.session_state["model_link"] = ""
209
+
210
+ submit_button = st.button('Submit Model', key="submit")
211
+
212
+ if submit_button==True and model_name!='' and model_link!='':
213
+ response = get_model_name(model_name)
214
+ if response==None:
215
+ model_name_exist=False
216
+ input_data = {'key': model_name, 'Model Name': model_name, 'Model HF Name': model_link, 'Model Type': selected_model_type,
217
+ 'Model Precision': selected_model_precision, 'Evaluation Tasks': selected_eval_tasks}
218
+ insert_model(input_data)
219
+ submitted_models = submitted_models.append(pd.DataFrame(input_data, index=[0]), ignore_index=True)
220
+ submitted_models = submitted_models[['Model Name','Model HF Name', 'Model Type','Model Precision','Evaluation Tasks']]
221
+ else: model_name_exist=True
222
+
223
+ elif submit_button==True and model_name!='' and model_link=='':
224
+ response = get_model_name(model_name)
225
+ if response==None:
226
+ model_name_exist=False
227
+ input_data = {'key': model_name, 'Model Name': model_name, 'Model HF Name': None, 'Model Type': selected_model_type,
228
+ 'Model Precision': selected_model_precision, 'Evaluation Tasks': selected_eval_tasks}
229
+ insert_model(input_data)
230
+ submitted_models = submitted_models.append(pd.DataFrame(input_data, index=[0]), ignore_index=True)
231
+ submitted_models = submitted_models[['Model Name','Model HF Name', 'Model Type','Model Precision','Evaluation Tasks']]
232
+ else: model_name_exist=True
233
+
234
+ else: pass
235
+
236
+
237
+ with col2:
238
+ st.button('Clear Form', on_click=clear_text)
239
+
240
+ st.markdown("##")
241
+
242
+ c, col1 , col2 = st.columns([0.15, 3, 0.15], gap="small")
243
+
244
+ with col1:
245
+ if submit_button==True and model_name!='' and model_link!='' and model_name_exist==False:
246
+ st.success("You have submitted your model successfully", icon="")
247
+ st.data_editor(submitted_models, hide_index=True, use_container_width=st.session_state.use_container_width)
248
+
249
+ elif submit_button==True and model_name!='' and model_link=='' and model_name_exist==False:
250
+ st.warning("You have submitted your model, but the model's HuggingFace name is missing", icon="⚠️")
251
+ st.data_editor(submitted_models, hide_index=True, use_container_width=st.session_state.use_container_width)
252
+
253
+ elif submit_button==True and model_name=='' and model_link!='':
254
+ st.error("You have not submitted the required information", icon="")
255
+
256
+ elif submit_button==True and model_name=='' and model_link=='':
257
+ st.error("You have not submitted the required information", icon="")
258
+
259
+ elif submit_button==True and model_name!='' and model_link!='' and model_name_exist==True:
260
+ st.error("The model already submitted. Contact admin for help: { info@wishwork.org }", icon="")
261
+
262
+ elif submit_button==True and model_name!='' and model_link=='' and model_name_exist==True:
263
+ st.error("The model already submitted. Contact admin for help: { info@wishwork.org }", icon="")
264
+
265
+ else: pass
266
+
267
+ st.markdown("##")
268
+
269
+ c, col1 , col2 = st.columns([0.15, 3, 0.15], gap="small")
270
+
271
+ with col1:
272
+ with st.expander("Recently Submitted Models for Evaluation ⬇️"):
273
+ try:
274
+ all_submitted_models = pd.DataFrame(data=fetch_all_models())
275
+ all_submitted_models = all_submitted_models[['Model Name','Model HF Name', 'Model Type','Model Precision','Evaluation Tasks']]
276
+ st.data_editor(all_submitted_models, hide_index=True, use_container_width=st.session_state.use_container_width, key="data_editor4")
277
+ except KeyError:
278
+ st.info('There are no submitted models for evaluation at this moment 😆', icon="ℹ️")
279
+
280
+
281
+
282
+ footer="""<div class="footer"> <p class="p1">Copyright © 2023 <a text-align: center;' href="https://www.wishwork.org" target="_blank">Wish Work Inc.</a></p> </div>"""
283
+ st.markdown(footer, unsafe_allow_html=True)