danschr commited on
Commit
80a2654
1 Parent(s): 6352644

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +54 -0
README.md CHANGED
@@ -4,3 +4,57 @@ pipeline_tag: text-classification
4
  widget:
5
  - text: "whaling is part of the culture of various indigenous population and should be allowed for the purpose of maintaining this tradition and way of life and sustenance, among other uses of a whale. against We should ban whaling"
6
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  widget:
5
  - text: "whaling is part of the culture of various indigenous population and should be allowed for the purpose of maintaining this tradition and way of life and sustenance, among other uses of a whale. against We should ban whaling"
6
  ---
7
+
8
+
9
+ ## Model Usage
10
+
11
+ ```python
12
+
13
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
14
+ LABEL_COLUMNS = ['Self-direction: thought',
15
+ 'Self-direction: action',
16
+ 'Stimulation',
17
+ 'Hedonism',
18
+ 'Achievement',
19
+ 'Power: dominance',
20
+ 'Power: resources',
21
+ 'Face',
22
+ 'Security: personal',
23
+ 'Security: societal',
24
+ 'Tradition',
25
+ 'Conformity: rules',
26
+ 'Conformity: interpersonal',
27
+ 'Humility',
28
+ 'Benevolence: caring',
29
+ 'Benevolence: dependability',
30
+ 'Universalism: concern',
31
+ 'Universalism: nature',
32
+ 'Universalism: tolerance',
33
+ 'Universalism: objectivity']
34
+
35
+ tokenizer = AutoTokenizer.from_pretrained("tum-nlp/Deberta_Human_Value_Detector")
36
+ model = AutoModelForSequenceClassification.from_pretrained("tum-nlp/Deberta_Human_Value_Detector", trust_remote_code=True)
37
+
38
+ example_text ='whaling is part of the culture of various indigenous population and should be allowed for the purpose of maintaining this tradition and way of life and sustenance, among other uses of a whale. against We should ban whaling'
39
+
40
+ encoding = TOKENIZER.encode_plus(
41
+ text,
42
+ add_special_tokens=True,
43
+ max_length=512,
44
+ return_token_type_ids=False,
45
+ padding="max_length",
46
+ return_attention_mask=True,
47
+ return_tensors='pt',
48
+ )
49
+
50
+ with torch.no_grad():
51
+ test_prediction = trained_model(encoding["input_ids"], encoding["attention_mask"])
52
+ test_prediction = test_prediction["logits"].flatten().numpy()
53
+
54
+ print(f"Predictions:")
55
+ for label, prediction in zip(LABEL_COLUMNS, test_prediction):
56
+ if prediction < THRESHOLD:
57
+ continue
58
+ print(f"{label}: {prediction}")
59
+ res[label] = prediction
60
+ ```