JRQi commited on
Commit
ef27278
1 Parent(s): ea2cadc

Update game3.py

Browse files
Files changed (1) hide show
  1. game3.py +24 -82
game3.py CHANGED
@@ -40,59 +40,28 @@ def func3(num_selected, human_predict, num1, num2, user_important):
40
 
41
  golden_label = (text['label']^1) * 100
42
 
43
-
44
- '''
45
- # (START) API version -- quick
46
-
47
- API_URL = "https://api-inference.huggingface.co/models/nlptown/bert-base-multilingual-uncased-sentiment"
48
- # API_URL = "https://api-inference.huggingface.co/models/cmarkea/distilcamembert-base-sentiment"
49
- headers = {"Authorization": "Bearer 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"}
50
-
51
- response = requests.post(API_URL, headers=headers, json=text['text'])
52
- output = response.json()
53
-
54
- # result = dict()
55
- star2num = {
56
- "5 stars": 100,
57
- "4 stars": 75,
58
- "3 stars": 50,
59
- "2 stars": 25,
60
- "1 star": 0,
61
- }
62
-
63
- print(output)
64
- out = output[0][0]
65
- # (END) API version
66
- '''
67
-
68
  # (START) off-the-shelf version -- slow at the beginning
69
  # Load model directly
70
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
71
-
72
- tokenizer = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
73
- model = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
74
-
75
  # Use a pipeline as a high-level helper
76
- from transformers import pipeline
77
 
78
- classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
79
  output = classifier([text['text']])
80
 
81
- star2num = {
82
- "5 stars": 100,
83
- "4 stars": 75,
84
- "3 stars": 50,
85
- "2 stars": 25,
86
- "1 star": 0,
87
- }
88
  print(output)
89
  out = output[0]
90
 
91
  # (END) off-the-shelf version
92
 
93
- ai_predict = star2num[out['label']]
94
- # result[label] = out['score']
95
-
96
  user_select = "You focused on "
97
  flag_select = False
98
  if user_important == "":
@@ -251,52 +220,29 @@ def func3_written(text_written, human_predict, lang_written):
251
  chatbot = []
252
  # num1: Human score; num2: AI score
253
 
254
- '''
255
- # (START) API version
256
-
257
- API_URL = "https://api-inference.huggingface.co/models/nlptown/bert-base-multilingual-uncased-sentiment"
258
- # API_URL = "https://api-inference.huggingface.co/models/cmarkea/distilcamembert-base-sentiment"
259
- headers = {"Authorization": "Bearer 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"}
260
-
261
- response = requests.post(API_URL, headers=headers, json=text_written)
262
- output = response.json()
263
-
264
- # result = dict()
265
- star2num = {
266
- "5 stars": 100,
267
- "4 stars": 75,
268
- "3 stars": 50,
269
- "2 stars": 25,
270
- "1 star": 0,
271
- }
272
-
273
- out = output[0][0]
274
- # (END) API version
275
- '''
276
-
277
  # (START) off-the-shelf version
278
 
279
  # tokenizer = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
280
  # model = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
281
 
282
- classifier = pipeline("sentiment-analysis", model="nlptown/bert-base-multilingual-uncased-sentiment")
283
-
284
  output = classifier([text_written])
285
 
286
- star2num = {
287
- "5 stars": 100,
288
- "4 stars": 75,
289
- "3 stars": 50,
290
- "2 stars": 25,
291
- "1 star": 0,
292
- }
293
  print(output)
294
  out = output[0]
295
  # (END) off-the-shelf version
296
 
297
 
298
- ai_predict = star2num[out['label']]
299
- # result[label] = out['score']
300
 
301
  if abs(ai_predict - human_predict) <= 12.5:
302
  chatbot.append(("AI gives it a close score! 🎉", "⬅️ Feel free to try another one! ⬅️"))
@@ -309,13 +255,9 @@ def func3_written(text_written, human_predict, lang_written):
309
 
310
  import shap
311
 
312
- # sentiment_classifier = pipeline("text-classification", return_all_scores=True)
313
- if lang_written == "Dutch":
314
- sentiment_classifier = pipeline("text-classification", model='DTAI-KULeuven/robbert-v2-dutch-sentiment', return_all_scores=True)
315
- else:
316
- sentiment_classifier = pipeline("text-classification", model='distilbert-base-uncased-finetuned-sst-2-english', return_all_scores=True)
317
 
318
- explainer = shap.Explainer(sentiment_classifier)
319
 
320
  shap_values = explainer([text_written])
321
  interpretation = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
 
40
 
41
  golden_label = (text['label']^1) * 100
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  # (START) off-the-shelf version -- slow at the beginning
44
  # Load model directly
 
 
 
 
 
45
  # Use a pipeline as a high-level helper
 
46
 
47
+ classifier = pipeline("text-classification", model="padmajabfrl/Gender-Classification")
48
  output = classifier([text['text']])
49
 
50
+ # star2num = {
51
+ # "5 stars": 100,
52
+ # "4 stars": 75,
53
+ # "3 stars": 50,
54
+ # "2 stars": 25,
55
+ # "1 star": 0,
56
+ # }
57
  print(output)
58
  out = output[0]
59
 
60
  # (END) off-the-shelf version
61
 
62
+ # ai_predict = out['label']
63
+ ai_predict = out['score']
64
+
65
  user_select = "You focused on "
66
  flag_select = False
67
  if user_important == "":
 
220
  chatbot = []
221
  # num1: Human score; num2: AI score
222
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  # (START) off-the-shelf version
224
 
225
  # tokenizer = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
226
  # model = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
227
 
228
+ classifier = pipeline("text-classification", model="padmajabfrl/Gender-Classification")
229
+
230
  output = classifier([text_written])
231
 
232
+ # star2num = {
233
+ # "5 stars": 100,
234
+ # "4 stars": 75,
235
+ # "3 stars": 50,
236
+ # "2 stars": 25,
237
+ # "1 star": 0,
238
+ # }
239
  print(output)
240
  out = output[0]
241
  # (END) off-the-shelf version
242
 
243
 
244
+ # ai_predict = star2num[out['label']]
245
+ ai_predict = out['score']
246
 
247
  if abs(ai_predict - human_predict) <= 12.5:
248
  chatbot.append(("AI gives it a close score! 🎉", "⬅️ Feel free to try another one! ⬅️"))
 
255
 
256
  import shap
257
 
258
+ gender_classifier = pipeline("text-classification", model="padmajabfrl/Gender-Classification", return_all_scores=True)
 
 
 
 
259
 
260
+ explainer = shap.Explainer(gender_classifier)
261
 
262
  shap_values = explainer([text_written])
263
  interpretation = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))