Spaces:
Sleeping
Sleeping
Upload trainDistilBERT.py
Browse files- trainDistilBERT.py +171 -0
trainDistilBERT.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This mod fine-tunes a BERT model on the ACARIS dataset for comparison with ACARISMdl.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from torch import nn
|
7 |
+
from torch.utils.data import Dataset, DataLoader
|
8 |
+
from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification, TrainingArguments, Trainer, AdamW, EarlyStoppingCallback, PreTrainedModel, DistilBertModel
|
9 |
+
from transformers.modeling_outputs import SequenceClassifierOutput
|
10 |
+
from datasets import load_dataset, Dataset
|
11 |
+
import pandas as pd
|
12 |
+
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix, roc_auc_score
|
13 |
+
import wandb
|
14 |
+
import huggingface_hub
|
15 |
+
import os
|
16 |
+
import random
|
17 |
+
import numpy as np
|
18 |
+
|
19 |
+
config = {
|
20 |
+
"mdl": "distilbert-base-uncased",
|
21 |
+
"epochs": 5,
|
22 |
+
"batchSize": 14,
|
23 |
+
"maxLen": 512,
|
24 |
+
"warmupSteps": 0.1, # proportion of total steps, NOT absolute
|
25 |
+
"weightDecay": 0.02,
|
26 |
+
"outputDir": "./output",
|
27 |
+
"earlyStopping": True,
|
28 |
+
"earlyStoppingPatience": 2,
|
29 |
+
"dropout": 0.1,
|
30 |
+
"initlr": 5e-5,
|
31 |
+
"epsilon": 1e-8
|
32 |
+
}
|
33 |
+
|
34 |
+
wandb.init(project="MarkIII_ACARIS", entity="simtoonia", config=config)
|
35 |
+
|
36 |
+
|
37 |
+
def lockSeed(seed):
|
38 |
+
random.seed(seed)
|
39 |
+
np.random.seed(seed)
|
40 |
+
torch.manual_seed(seed)
|
41 |
+
if torch.cuda.is_available():
|
42 |
+
torch.cuda.manual_seed_all(seed)
|
43 |
+
torch.backends.cudnn.deterministic = True
|
44 |
+
|
45 |
+
#0 disabled, as determinism is not guaranteed and lowers performance
|
46 |
+
#lockSeed(69) # setting a fixed seed for *some* reproducibility
|
47 |
+
|
48 |
+
class DistilBertForMulticlassSequenceClassification(DistilBertForSequenceClassification):
|
49 |
+
def __init__(self, config):
|
50 |
+
super().__init__(config)
|
51 |
+
|
52 |
+
def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None):
|
53 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
54 |
+
|
55 |
+
outputs = self.distilbert(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
56 |
+
|
57 |
+
hidden_state = outputs[0]
|
58 |
+
pooled_output = hidden_state[:, 0]
|
59 |
+
pooled_output = self.pre_classifier(pooled_output)
|
60 |
+
pooled_output = nn.ReLU()(pooled_output)
|
61 |
+
pooled_output = self.dropout(pooled_output)
|
62 |
+
logits = self.classifier(pooled_output)
|
63 |
+
|
64 |
+
loss = None
|
65 |
+
if labels is not None:
|
66 |
+
lossFct = nn.CrossEntropyLoss()
|
67 |
+
loss = lossFct(logits.view(-1, self.num_labels), labels.view(-1))
|
68 |
+
|
69 |
+
if not return_dict:
|
70 |
+
output = (logits,) + outputs[2:]
|
71 |
+
return ((loss,) + output) if loss is not None else output
|
72 |
+
|
73 |
+
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
class ACARISBERT:
|
78 |
+
def __init__(self, trainPath, valPath):
|
79 |
+
self.trainPath = trainPath
|
80 |
+
self.valPath = valPath
|
81 |
+
self.tokenizer = DistilBertTokenizerFast.from_pretrained(config["mdl"])
|
82 |
+
self.model = DistilBertForMulticlassSequenceClassification.from_pretrained(config["mdl"], num_labels=3, id2label={0: "neg", 1: "neu", 2: "pos"}, label2id={"neg": 0, "neu": 1, "pos": 2}, dropout=config["dropout"], attention_dropout=config["dropout"])
|
83 |
+
|
84 |
+
def read_data(self, path):
|
85 |
+
df = pd.read_csv(path, sep="|", usecols=["content", "sentiment"])
|
86 |
+
return Dataset.from_pandas(df)
|
87 |
+
|
88 |
+
def tokenize_data(self, dataset):
|
89 |
+
sentMapping = {"pos": 2, "neg": 0, "neu": 1}
|
90 |
+
tokenized = dataset.map(
|
91 |
+
lambda x: {
|
92 |
+
**self.tokenizer(x["content"], truncation=True, padding="max_length", max_length=config["maxLen"]),
|
93 |
+
"labels": torch.tensor([sentMapping[sent] for sent in x["sentiment"]])
|
94 |
+
},
|
95 |
+
batched=True,
|
96 |
+
remove_columns=["content", "sentiment"]
|
97 |
+
)
|
98 |
+
tokenized.set_format(type="torch", columns=["input_ids", "attention_mask", "labels"])
|
99 |
+
return tokenized
|
100 |
+
|
101 |
+
def get_data_loaders(self, trainDS, valDS):
|
102 |
+
trainLoader = DataLoader(trainDS, batch_size=config["batchSize"], shuffle=False)
|
103 |
+
valLoader = DataLoader(valDS, batch_size=config["batchSize"], shuffle=False)
|
104 |
+
return trainLoader, valLoader
|
105 |
+
|
106 |
+
def compute_metrics(self, evalPred):
|
107 |
+
logits, labels = evalPred
|
108 |
+
preds = torch.argmax(torch.Tensor(logits), dim=1)
|
109 |
+
probs = torch.nn.functional.softmax(torch.Tensor(logits), dim=1)
|
110 |
+
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average=None)
|
111 |
+
accuracy = accuracy_score(labels, preds)
|
112 |
+
rocAUC = roc_auc_score(labels, probs, multi_class="ovr")
|
113 |
+
metrics = {
|
114 |
+
"accuracy": accuracy,
|
115 |
+
"roc_auc": rocAUC
|
116 |
+
}
|
117 |
+
metricNames = ["precision", "recall", "f1"]
|
118 |
+
labelNames = ["neg", "neu", "pos"]
|
119 |
+
for metricName, metricValue in zip(metricNames, [precision, recall, f1]):
|
120 |
+
for labelName, value in zip(labelNames, metricValue):
|
121 |
+
metrics[f"{metricName}_{labelName}"] = float(value)
|
122 |
+
return metrics
|
123 |
+
|
124 |
+
def train(self):
|
125 |
+
trainDS = self.tokenize_data(self.read_data(self.trainPath))
|
126 |
+
valDS = self.tokenize_data(self.read_data(self.valPath))
|
127 |
+
|
128 |
+
totalSteps = len(trainDS) // config["batchSize"] * config["epochs"]
|
129 |
+
warmupSteps = int(totalSteps * config["warmupSteps"])
|
130 |
+
|
131 |
+
trainingArgs = TrainingArguments(
|
132 |
+
output_dir=config["outputDir"],
|
133 |
+
num_train_epochs=config["epochs"],
|
134 |
+
per_device_train_batch_size=config["batchSize"],
|
135 |
+
per_device_eval_batch_size=config["batchSize"],
|
136 |
+
warmup_steps=warmupSteps,
|
137 |
+
weight_decay=config["weightDecay"],
|
138 |
+
logging_dir="./logs",
|
139 |
+
logging_steps=100,
|
140 |
+
learning_rate=config["initlr"],
|
141 |
+
evaluation_strategy="epoch",
|
142 |
+
save_strategy="epoch",
|
143 |
+
load_best_model_at_end=True,
|
144 |
+
metric_for_best_model="accuracy",
|
145 |
+
save_total_limit=5,
|
146 |
+
adam_epsilon=config["epsilon"],
|
147 |
+
report_to="wandb",
|
148 |
+
fp16=True
|
149 |
+
)
|
150 |
+
|
151 |
+
trainer = Trainer(
|
152 |
+
model=self.model,
|
153 |
+
args=trainingArgs,
|
154 |
+
train_dataset=trainDS,
|
155 |
+
eval_dataset=valDS,
|
156 |
+
compute_metrics=self.compute_metrics,
|
157 |
+
callbacks=[EarlyStoppingCallback(early_stopping_patience=config["earlyStoppingPatience"])]
|
158 |
+
)
|
159 |
+
print(f"Number of parameters: {trainer.model.num_parameters()}")
|
160 |
+
print("Running eval ...")
|
161 |
+
trainer.evaluate()
|
162 |
+
print("Running training ...")
|
163 |
+
trainer.train()
|
164 |
+
print("Saving model ...")
|
165 |
+
trainer.save_model(config["outputDir"])
|
166 |
+
|
167 |
+
|
168 |
+
if __name__ == "__main__":
|
169 |
+
acaris_bert = ACARISBERT("./datasets/train.csv", "./datasets/val.csv")
|
170 |
+
acaris_bert.train()
|
171 |
+
wandb.finish()
|