daekeun-ml
commited on
Merge branch 'main' of https://huggingface.co/daekeun-ml/koelectra-small-v3-nsmc
Browse files
README.md
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Sentiment Binary Classification (fine-tuning with KoELECTRA-Small-v3 model and Naver Sentiment Movie Corpus dataset)
|
2 |
+
|
3 |
+
## Usage (Amazon SageMaker inference applicable)
|
4 |
+
It uses the interface of the SageMaker Inference Toolkit as is, so it can be easily deployed to SageMaker Endpoint.
|
5 |
+
|
6 |
+
### inference_nsmc.py
|
7 |
+
|
8 |
+
```python
|
9 |
+
import json
|
10 |
+
import sys
|
11 |
+
import logging
|
12 |
+
import torch
|
13 |
+
from torch import nn
|
14 |
+
from transformers import ElectraConfig
|
15 |
+
from transformers import ElectraModel, AutoTokenizer, ElectraTokenizer, ElectraForSequenceClassification
|
16 |
+
|
17 |
+
logging.basicConfig(
|
18 |
+
level=logging.INFO,
|
19 |
+
format='[{%(filename)s:%(lineno)d} %(levelname)s - %(message)s',
|
20 |
+
handlers=[
|
21 |
+
logging.FileHandler(filename='tmp.log'),
|
22 |
+
logging.StreamHandler(sys.stdout)
|
23 |
+
]
|
24 |
+
)
|
25 |
+
logger = logging.getLogger(__name__)
|
26 |
+
|
27 |
+
max_seq_length = 128
|
28 |
+
classes = ['Neg', 'Pos']
|
29 |
+
|
30 |
+
tokenizer = AutoTokenizer.from_pretrained("daekeun-ml/koelectra-small-v3-nsmc")
|
31 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
32 |
+
|
33 |
+
|
34 |
+
def model_fn(model_path=None):
|
35 |
+
####
|
36 |
+
# If you have your own trained model
|
37 |
+
# Huggingface pre-trained model: 'monologg/koelectra-small-v3-discriminator'
|
38 |
+
####
|
39 |
+
#config = ElectraConfig.from_json_file(f'{model_path}/config.json')
|
40 |
+
#model = ElectraForSequenceClassification.from_pretrained(f'{model_path}/model.pth', config=config)
|
41 |
+
|
42 |
+
# Download model from the Huggingface hub
|
43 |
+
model = ElectraForSequenceClassification.from_pretrained('daekeun-ml/koelectra-small-v3-nsmc')
|
44 |
+
model.to(device)
|
45 |
+
return model
|
46 |
+
|
47 |
+
|
48 |
+
def input_fn(input_data, content_type="application/jsonlines"):
|
49 |
+
data_str = input_data.decode("utf-8")
|
50 |
+
jsonlines = data_str.split("\n")
|
51 |
+
transformed_inputs = []
|
52 |
+
|
53 |
+
for jsonline in jsonlines:
|
54 |
+
text = json.loads(jsonline)["text"][0]
|
55 |
+
logger.info("input text: {}".format(text))
|
56 |
+
encode_plus_token = tokenizer.encode_plus(
|
57 |
+
text,
|
58 |
+
max_length=max_seq_length,
|
59 |
+
add_special_tokens=True,
|
60 |
+
return_token_type_ids=False,
|
61 |
+
padding="max_length",
|
62 |
+
return_attention_mask=True,
|
63 |
+
return_tensors="pt",
|
64 |
+
truncation=True,
|
65 |
+
)
|
66 |
+
transformed_inputs.append(encode_plus_token)
|
67 |
+
|
68 |
+
return transformed_inputs
|
69 |
+
|
70 |
+
|
71 |
+
def predict_fn(transformed_inputs, model):
|
72 |
+
predicted_classes = []
|
73 |
+
|
74 |
+
for data in transformed_inputs:
|
75 |
+
data = data.to(device)
|
76 |
+
output = model(**data)
|
77 |
+
|
78 |
+
softmax_fn = nn.Softmax(dim=1)
|
79 |
+
softmax_output = softmax_fn(output[0])
|
80 |
+
_, prediction = torch.max(softmax_output, dim=1)
|
81 |
+
|
82 |
+
predicted_class_idx = prediction.item()
|
83 |
+
predicted_class = classes[predicted_class_idx]
|
84 |
+
score = softmax_output[0][predicted_class_idx]
|
85 |
+
logger.info("predicted_class: {}".format(predicted_class))
|
86 |
+
|
87 |
+
prediction_dict = {}
|
88 |
+
prediction_dict["predicted_label"] = predicted_class
|
89 |
+
prediction_dict['score'] = score.cpu().detach().numpy().tolist()
|
90 |
+
|
91 |
+
jsonline = json.dumps(prediction_dict)
|
92 |
+
logger.info("jsonline: {}".format(jsonline))
|
93 |
+
predicted_classes.append(jsonline)
|
94 |
+
|
95 |
+
predicted_classes_jsonlines = "\n".join(predicted_classes)
|
96 |
+
return predicted_classes_jsonlines
|
97 |
+
|
98 |
+
|
99 |
+
def output_fn(outputs, accept="application/jsonlines"):
|
100 |
+
return outputs, accept
|
101 |
+
```
|
102 |
+
|
103 |
+
### test.py
|
104 |
+
```python
|
105 |
+
>>> from inference_nsmc import model_fn, input_fn, predict_fn, output_fn
|
106 |
+
>>> with open('samples/nsmc.txt', mode='rb') as file:
|
107 |
+
>>> model_input_data = file.read()
|
108 |
+
>>> model = model_fn()
|
109 |
+
>>> transformed_inputs = input_fn(model_input_data)
|
110 |
+
>>> predicted_classes_jsonlines = predict_fn(transformed_inputs, model)
|
111 |
+
>>> model_outputs = output_fn(predicted_classes_jsonlines)
|
112 |
+
>>> print(model_outputs[0])
|
113 |
+
|
114 |
+
[{inference_nsmc.py:47} INFO - input text: 이 영화는 최고의 영화입니다
|
115 |
+
[{inference_nsmc.py:47} INFO - input text: 최악이에요. 배우의 연기력도 좋지 않고 내용도 너무 허접합니다
|
116 |
+
[{inference_nsmc.py:77} INFO - predicted_class: Pos
|
117 |
+
[{inference_nsmc.py:84} INFO - jsonline: {"predicted_label": "Pos", "score": 0.9619030952453613}
|
118 |
+
[{inference_nsmc.py:77} INFO - predicted_class: Neg
|
119 |
+
[{inference_nsmc.py:84} INFO - jsonline: {"predicted_label": "Neg", "score": 0.9994170665740967}
|
120 |
+
{"predicted_label": "Pos", "score": 0.9619030952453613}
|
121 |
+
{"predicted_label": "Neg", "score": 0.9994170665740967}
|
122 |
+
```
|
123 |
+
|
124 |
+
### Sample data (samples/nsmc.txt)
|
125 |
+
```
|
126 |
+
{"text": ["이 영화는 최고의 영화입니다"]}
|
127 |
+
{"text": ["최악이에요. 배우의 연기력도 좋지 않고 내용도 너무 허접합니다"]}
|
128 |
+
```
|
129 |
+
|
130 |
+
## References
|
131 |
+
- KoELECTRA: https://github.com/monologg/KoELECTRA
|
132 |
+
- Naver Sentiment Movie Corpus Dataset: https://github.com/e9t/nsmc
|