avuhong commited on
Commit
6f62989
1 Parent(s): 817fe09

Upload 4 files

Browse files
Files changed (4) hide show
  1. ds_seq_train.csv +0 -0
  2. ds_seq_val.csv +0 -0
  3. run_mlm.py +659 -0
  4. train_AAVesm2_650M_800_v1.sh +15 -0
ds_seq_train.csv ADDED
The diff for this file is too large to render. See raw diff
 
ds_seq_val.csv ADDED
The diff for this file is too large to render. See raw diff
 
run_mlm.py ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Team All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) on a text file or a dataset.
18
+
19
+ Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
20
+ https://huggingface.co/models?filter=fill-mask
21
+ """
22
+ # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.
23
+
24
+ import logging
25
+ import math
26
+ import os
27
+ import sys
28
+ from dataclasses import dataclass, field
29
+ from itertools import chain
30
+ from typing import Optional
31
+
32
+ import datasets
33
+ import evaluate
34
+ from datasets import load_dataset
35
+
36
+ import transformers
37
+ from transformers import (
38
+ CONFIG_MAPPING,
39
+ MODEL_FOR_MASKED_LM_MAPPING,
40
+ AutoConfig,
41
+ AutoModelForMaskedLM,
42
+ AutoTokenizer,
43
+ DataCollatorForLanguageModeling,
44
+ HfArgumentParser,
45
+ Trainer,
46
+ TrainingArguments,
47
+ is_torch_tpu_available,
48
+ set_seed,
49
+ )
50
+ from transformers.trainer_utils import get_last_checkpoint
51
+ from transformers.utils import check_min_version, send_example_telemetry
52
+ from transformers.utils.versions import require_version
53
+
54
+
55
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
56
+ # check_min_version("4.29.0.dev0")
57
+
58
+ require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
59
+
60
+ logger = logging.getLogger(__name__)
61
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
62
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
63
+
64
+
65
+ @dataclass
66
+ class ModelArguments:
67
+ """
68
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
69
+ """
70
+
71
+ model_name_or_path: Optional[str] = field(
72
+ default=None,
73
+ metadata={
74
+ "help": (
75
+ "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch."
76
+ )
77
+ },
78
+ )
79
+ model_type: Optional[str] = field(
80
+ default=None,
81
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
82
+ )
83
+ config_overrides: Optional[str] = field(
84
+ default=None,
85
+ metadata={
86
+ "help": (
87
+ "Override some existing default config settings when a model is trained from scratch. Example: "
88
+ "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
89
+ )
90
+ },
91
+ )
92
+ config_name: Optional[str] = field(
93
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
94
+ )
95
+ tokenizer_name: Optional[str] = field(
96
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
97
+ )
98
+ cache_dir: Optional[str] = field(
99
+ default=None,
100
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
101
+ )
102
+ use_fast_tokenizer: bool = field(
103
+ default=True,
104
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
105
+ )
106
+ model_revision: str = field(
107
+ default="main",
108
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
109
+ )
110
+ use_auth_token: bool = field(
111
+ default=False,
112
+ metadata={
113
+ "help": (
114
+ "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
115
+ "with private models)."
116
+ )
117
+ },
118
+ )
119
+ low_cpu_mem_usage: bool = field(
120
+ default=False,
121
+ metadata={
122
+ "help": (
123
+ "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
124
+ "set True will benefit LLM loading time and RAM consumption."
125
+ )
126
+ },
127
+ )
128
+
129
+ def __post_init__(self):
130
+ if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
131
+ raise ValueError(
132
+ "--config_overrides can't be used in combination with --config_name or --model_name_or_path"
133
+ )
134
+
135
+
136
+ @dataclass
137
+ class DataTrainingArguments:
138
+ """
139
+ Arguments pertaining to what data we are going to input our model for training and eval.
140
+ """
141
+
142
+ dataset_name: Optional[str] = field(
143
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
144
+ )
145
+ dataset_config_name: Optional[str] = field(
146
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
147
+ )
148
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
149
+ validation_file: Optional[str] = field(
150
+ default=None,
151
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
152
+ )
153
+ overwrite_cache: bool = field(
154
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
155
+ )
156
+ validation_split_percentage: Optional[int] = field(
157
+ default=5,
158
+ metadata={
159
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
160
+ },
161
+ )
162
+ max_seq_length: Optional[int] = field(
163
+ default=None,
164
+ metadata={
165
+ "help": (
166
+ "The maximum total input sequence length after tokenization. Sequences longer "
167
+ "than this will be truncated."
168
+ )
169
+ },
170
+ )
171
+ preprocessing_num_workers: Optional[int] = field(
172
+ default=None,
173
+ metadata={"help": "The number of processes to use for the preprocessing."},
174
+ )
175
+ mlm_probability: float = field(
176
+ default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
177
+ )
178
+ line_by_line: bool = field(
179
+ default=False,
180
+ metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
181
+ )
182
+ pad_to_max_length: bool = field(
183
+ default=False,
184
+ metadata={
185
+ "help": (
186
+ "Whether to pad all samples to `max_seq_length`. "
187
+ "If False, will pad the samples dynamically when batching to the maximum length in the batch."
188
+ )
189
+ },
190
+ )
191
+ max_train_samples: Optional[int] = field(
192
+ default=None,
193
+ metadata={
194
+ "help": (
195
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
196
+ "value if set."
197
+ )
198
+ },
199
+ )
200
+ max_eval_samples: Optional[int] = field(
201
+ default=None,
202
+ metadata={
203
+ "help": (
204
+ "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
205
+ "value if set."
206
+ )
207
+ },
208
+ )
209
+ streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
210
+
211
+ def __post_init__(self):
212
+ if self.streaming:
213
+ require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
214
+
215
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
216
+ raise ValueError("Need either a dataset name or a training/validation file.")
217
+ else:
218
+ if self.train_file is not None:
219
+ extension = self.train_file.split(".")[-1]
220
+ if extension not in ["csv", "json", "txt"]:
221
+ raise ValueError("`train_file` should be a csv, a json or a txt file.")
222
+ if self.validation_file is not None:
223
+ extension = self.validation_file.split(".")[-1]
224
+ if extension not in ["csv", "json", "txt"]:
225
+ raise ValueError("`validation_file` should be a csv, a json or a txt file.")
226
+
227
+
228
+ def main():
229
+ # See all possible arguments in src/transformers/training_args.py
230
+ # or by passing the --help flag to this script.
231
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
232
+
233
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
234
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
235
+ # If we pass only one argument to the script and it's the path to a json file,
236
+ # let's parse it to get our arguments.
237
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
238
+ else:
239
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
240
+
241
+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
242
+ # information sent is the one passed as arguments along with your Python/PyTorch versions.
243
+ send_example_telemetry("run_mlm", model_args, data_args)
244
+
245
+ # Setup logging
246
+ logging.basicConfig(
247
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
248
+ datefmt="%m/%d/%Y %H:%M:%S",
249
+ handlers=[logging.StreamHandler(sys.stdout)],
250
+ )
251
+
252
+ if training_args.should_log:
253
+ # The default of training_args.log_level is passive, so we set log level at info here to have that default.
254
+ transformers.utils.logging.set_verbosity_info()
255
+
256
+ log_level = training_args.get_process_log_level()
257
+ logger.setLevel(log_level)
258
+ datasets.utils.logging.set_verbosity(log_level)
259
+ transformers.utils.logging.set_verbosity(log_level)
260
+ transformers.utils.logging.enable_default_handler()
261
+ transformers.utils.logging.enable_explicit_format()
262
+
263
+ # Log on each process the small summary:
264
+ logger.warning(
265
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
266
+ + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
267
+ )
268
+ # Set the verbosity to info of the Transformers logger (on main process only):
269
+ logger.info(f"Training/evaluation parameters {training_args}")
270
+
271
+ # Detecting last checkpoint.
272
+ last_checkpoint = None
273
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
274
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
275
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
276
+ raise ValueError(
277
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
278
+ "Use --overwrite_output_dir to overcome."
279
+ )
280
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
281
+ logger.info(
282
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
283
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
284
+ )
285
+
286
+ # Set seed before initializing model.
287
+ set_seed(training_args.seed)
288
+
289
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
290
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
291
+ # (the dataset will be downloaded automatically from the datasets Hub
292
+ #
293
+ # For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
294
+ # behavior (see below)
295
+ #
296
+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently
297
+ # download the dataset.
298
+ if data_args.dataset_name is not None:
299
+ # Downloading and loading a dataset from the hub.
300
+ raw_datasets = load_dataset(
301
+ data_args.dataset_name,
302
+ data_args.dataset_config_name,
303
+ cache_dir=model_args.cache_dir,
304
+ use_auth_token=True if model_args.use_auth_token else None,
305
+ streaming=data_args.streaming,
306
+ )
307
+ if "validation" not in raw_datasets.keys():
308
+ raw_datasets["validation"] = load_dataset(
309
+ data_args.dataset_name,
310
+ data_args.dataset_config_name,
311
+ split=f"train[:{data_args.validation_split_percentage}%]",
312
+ cache_dir=model_args.cache_dir,
313
+ use_auth_token=True if model_args.use_auth_token else None,
314
+ streaming=data_args.streaming,
315
+ )
316
+ raw_datasets["train"] = load_dataset(
317
+ data_args.dataset_name,
318
+ data_args.dataset_config_name,
319
+ split=f"train[{data_args.validation_split_percentage}%:]",
320
+ cache_dir=model_args.cache_dir,
321
+ use_auth_token=True if model_args.use_auth_token else None,
322
+ streaming=data_args.streaming,
323
+ )
324
+ else:
325
+ data_files = {}
326
+ if data_args.train_file is not None:
327
+ data_files["train"] = data_args.train_file
328
+ extension = data_args.train_file.split(".")[-1]
329
+ if data_args.validation_file is not None:
330
+ data_files["validation"] = data_args.validation_file
331
+ extension = data_args.validation_file.split(".")[-1]
332
+ if extension == "txt":
333
+ extension = "text"
334
+ raw_datasets = load_dataset(
335
+ extension,
336
+ data_files=data_files,
337
+ cache_dir=model_args.cache_dir,
338
+ use_auth_token=True if model_args.use_auth_token else None,
339
+ )
340
+
341
+ # If no validation data is there, validation_split_percentage will be used to divide the dataset.
342
+ if "validation" not in raw_datasets.keys():
343
+ raw_datasets["validation"] = load_dataset(
344
+ extension,
345
+ data_files=data_files,
346
+ split=f"train[:{data_args.validation_split_percentage}%]",
347
+ cache_dir=model_args.cache_dir,
348
+ use_auth_token=True if model_args.use_auth_token else None,
349
+ )
350
+ raw_datasets["train"] = load_dataset(
351
+ extension,
352
+ data_files=data_files,
353
+ split=f"train[{data_args.validation_split_percentage}%:]",
354
+ cache_dir=model_args.cache_dir,
355
+ use_auth_token=True if model_args.use_auth_token else None,
356
+ )
357
+
358
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
359
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
360
+
361
+ # Load pretrained model and tokenizer
362
+ #
363
+ # Distributed training:
364
+ # The .from_pretrained methods guarantee that only one local process can concurrently
365
+ # download model & vocab.
366
+ config_kwargs = {
367
+ "cache_dir": model_args.cache_dir,
368
+ "revision": model_args.model_revision,
369
+ "use_auth_token": True if model_args.use_auth_token else None,
370
+ }
371
+ if model_args.config_name:
372
+ config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
373
+ elif model_args.model_name_or_path:
374
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
375
+ else:
376
+ config = CONFIG_MAPPING[model_args.model_type]()
377
+ logger.warning("You are instantiating a new config instance from scratch.")
378
+ if model_args.config_overrides is not None:
379
+ logger.info(f"Overriding config: {model_args.config_overrides}")
380
+ config.update_from_string(model_args.config_overrides)
381
+ logger.info(f"New config: {config}")
382
+
383
+ tokenizer_kwargs = {
384
+ "cache_dir": model_args.cache_dir,
385
+ "use_fast": model_args.use_fast_tokenizer,
386
+ "revision": model_args.model_revision,
387
+ "use_auth_token": True if model_args.use_auth_token else None,
388
+ }
389
+ if model_args.tokenizer_name:
390
+ tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
391
+ elif model_args.model_name_or_path:
392
+ tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
393
+ else:
394
+ raise ValueError(
395
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
396
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
397
+ )
398
+
399
+ if model_args.model_name_or_path:
400
+ model = AutoModelForMaskedLM.from_pretrained(
401
+ model_args.model_name_or_path,
402
+ from_tf=bool(".ckpt" in model_args.model_name_or_path),
403
+ config=config,
404
+ cache_dir=model_args.cache_dir,
405
+ revision=model_args.model_revision,
406
+ use_auth_token=True if model_args.use_auth_token else None,
407
+ low_cpu_mem_usage=model_args.low_cpu_mem_usage,
408
+ )
409
+ else:
410
+ logger.info("Training new model from scratch")
411
+ model = AutoModelForMaskedLM.from_config(config)
412
+
413
+ # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
414
+ # on a small vocab and want a smaller embedding size, remove this test.
415
+ embedding_size = model.get_input_embeddings().weight.shape[0]
416
+ if len(tokenizer) > embedding_size:
417
+ model.resize_token_embeddings(len(tokenizer))
418
+
419
+ # Preprocessing the datasets.
420
+ # First we tokenize all the texts.
421
+ if training_args.do_train:
422
+ column_names = list(raw_datasets["train"].features)
423
+ else:
424
+ column_names = list(raw_datasets["validation"].features)
425
+ text_column_name = "text" if "text" in column_names else column_names[0]
426
+
427
+ if data_args.max_seq_length is None:
428
+ max_seq_length = tokenizer.model_max_length
429
+ if max_seq_length > 1024:
430
+ logger.warning(
431
+ "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
432
+ " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
433
+ " override this default with `--block_size xxx`."
434
+ )
435
+ max_seq_length = 1024
436
+ else:
437
+ if data_args.max_seq_length > tokenizer.model_max_length:
438
+ logger.warning(
439
+ f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
440
+ f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
441
+ )
442
+ max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
443
+
444
+ if data_args.line_by_line:
445
+ # When using line_by_line, we just tokenize each nonempty line.
446
+ padding = "max_length" if data_args.pad_to_max_length else False
447
+
448
+ def tokenize_function(examples):
449
+ # Remove empty lines
450
+ examples[text_column_name] = [
451
+ line for line in examples[text_column_name] if len(line) > 0 and not line.isspace()
452
+ ]
453
+ return tokenizer(
454
+ examples[text_column_name],
455
+ padding=padding,
456
+ truncation=True,
457
+ max_length=max_seq_length,
458
+ # We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
459
+ # receives the `special_tokens_mask`.
460
+ return_special_tokens_mask=True,
461
+ )
462
+
463
+ with training_args.main_process_first(desc="dataset map tokenization"):
464
+ if not data_args.streaming:
465
+ tokenized_datasets = raw_datasets.map(
466
+ tokenize_function,
467
+ batched=True,
468
+ num_proc=data_args.preprocessing_num_workers,
469
+ remove_columns=[text_column_name],
470
+ load_from_cache_file=not data_args.overwrite_cache,
471
+ desc="Running tokenizer on dataset line_by_line",
472
+ )
473
+ else:
474
+ tokenized_datasets = raw_datasets.map(
475
+ tokenize_function,
476
+ batched=True,
477
+ remove_columns=[text_column_name],
478
+ )
479
+ else:
480
+ # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
481
+ # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
482
+ # efficient when it receives the `special_tokens_mask`.
483
+ def tokenize_function(examples):
484
+ return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
485
+
486
+ with training_args.main_process_first(desc="dataset map tokenization"):
487
+ if not data_args.streaming:
488
+ tokenized_datasets = raw_datasets.map(
489
+ tokenize_function,
490
+ batched=True,
491
+ num_proc=data_args.preprocessing_num_workers,
492
+ remove_columns=column_names,
493
+ load_from_cache_file=not data_args.overwrite_cache,
494
+ desc="Running tokenizer on every text in dataset",
495
+ )
496
+ else:
497
+ tokenized_datasets = raw_datasets.map(
498
+ tokenize_function,
499
+ batched=True,
500
+ remove_columns=column_names,
501
+ )
502
+
503
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of
504
+ # max_seq_length.
505
+ def group_texts(examples):
506
+ # Concatenate all texts.
507
+ concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
508
+ total_length = len(concatenated_examples[list(examples.keys())[0]])
509
+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
510
+ # customize this part to your needs.
511
+ if total_length >= max_seq_length:
512
+ total_length = (total_length // max_seq_length) * max_seq_length
513
+ # Split by chunks of max_len.
514
+ result = {
515
+ k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
516
+ for k, t in concatenated_examples.items()
517
+ }
518
+ return result
519
+
520
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
521
+ # remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
522
+ # might be slower to preprocess.
523
+ #
524
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
525
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
526
+
527
+ with training_args.main_process_first(desc="grouping texts together"):
528
+ if not data_args.streaming:
529
+ tokenized_datasets = tokenized_datasets.map(
530
+ group_texts,
531
+ batched=True,
532
+ num_proc=data_args.preprocessing_num_workers,
533
+ load_from_cache_file=not data_args.overwrite_cache,
534
+ desc=f"Grouping texts in chunks of {max_seq_length}",
535
+ )
536
+ else:
537
+ tokenized_datasets = tokenized_datasets.map(
538
+ group_texts,
539
+ batched=True,
540
+ )
541
+
542
+ if training_args.do_train:
543
+ if "train" not in tokenized_datasets:
544
+ raise ValueError("--do_train requires a train dataset")
545
+ train_dataset = tokenized_datasets["train"]
546
+ if data_args.max_train_samples is not None:
547
+ max_train_samples = min(len(train_dataset), data_args.max_train_samples)
548
+ train_dataset = train_dataset.select(range(max_train_samples))
549
+
550
+ if training_args.do_eval:
551
+ if "validation" not in tokenized_datasets:
552
+ raise ValueError("--do_eval requires a validation dataset")
553
+ eval_dataset = tokenized_datasets["validation"]
554
+ if data_args.max_eval_samples is not None:
555
+ max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
556
+ eval_dataset = eval_dataset.select(range(max_eval_samples))
557
+
558
+ def preprocess_logits_for_metrics(logits, labels):
559
+ if isinstance(logits, tuple):
560
+ # Depending on the model and config, logits may contain extra tensors,
561
+ # like past_key_values, but logits always come first
562
+ logits = logits[0]
563
+ return logits.argmax(dim=-1)
564
+
565
+ metric = evaluate.load("accuracy")
566
+
567
+ def compute_metrics(eval_preds):
568
+ preds, labels = eval_preds
569
+ # preds have the same shape as the labels, after the argmax(-1) has been calculated
570
+ # by preprocess_logits_for_metrics
571
+ labels = labels.reshape(-1)
572
+ preds = preds.reshape(-1)
573
+ mask = labels != -100
574
+ labels = labels[mask]
575
+ preds = preds[mask]
576
+ return metric.compute(predictions=preds, references=labels)
577
+
578
+ # Data collator
579
+ # This one will take care of randomly masking the tokens.
580
+ pad_to_multiple_of_8 = data_args.line_by_line and training_args.fp16 and not data_args.pad_to_max_length
581
+ data_collator = DataCollatorForLanguageModeling(
582
+ tokenizer=tokenizer,
583
+ mlm_probability=data_args.mlm_probability,
584
+ pad_to_multiple_of=8 if pad_to_multiple_of_8 else None,
585
+ )
586
+
587
+ # Initialize our Trainer
588
+ trainer = Trainer(
589
+ model=model,
590
+ args=training_args,
591
+ train_dataset=train_dataset if training_args.do_train else None,
592
+ eval_dataset=eval_dataset if training_args.do_eval else None,
593
+ tokenizer=tokenizer,
594
+ data_collator=data_collator,
595
+ compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
596
+ preprocess_logits_for_metrics=preprocess_logits_for_metrics
597
+ if training_args.do_eval and not is_torch_tpu_available()
598
+ else None,
599
+ )
600
+
601
+ # Training
602
+ if training_args.do_train:
603
+ checkpoint = None
604
+ if training_args.resume_from_checkpoint is not None:
605
+ checkpoint = training_args.resume_from_checkpoint
606
+ elif last_checkpoint is not None:
607
+ checkpoint = last_checkpoint
608
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
609
+ trainer.save_model() # Saves the tokenizer too for easy upload
610
+ metrics = train_result.metrics
611
+
612
+ max_train_samples = (
613
+ data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
614
+ )
615
+ metrics["train_samples"] = min(max_train_samples, len(train_dataset))
616
+
617
+ trainer.log_metrics("train", metrics)
618
+ trainer.save_metrics("train", metrics)
619
+ trainer.save_state()
620
+
621
+ # Evaluation
622
+ if training_args.do_eval:
623
+ logger.info("*** Evaluate ***")
624
+
625
+ metrics = trainer.evaluate()
626
+
627
+ max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
628
+ metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
629
+ try:
630
+ perplexity = math.exp(metrics["eval_loss"])
631
+ except OverflowError:
632
+ perplexity = float("inf")
633
+ metrics["perplexity"] = perplexity
634
+
635
+ trainer.log_metrics("eval", metrics)
636
+ trainer.save_metrics("eval", metrics)
637
+
638
+ kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"}
639
+ if data_args.dataset_name is not None:
640
+ kwargs["dataset_tags"] = data_args.dataset_name
641
+ if data_args.dataset_config_name is not None:
642
+ kwargs["dataset_args"] = data_args.dataset_config_name
643
+ kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
644
+ else:
645
+ kwargs["dataset"] = data_args.dataset_name
646
+
647
+ if training_args.push_to_hub:
648
+ trainer.push_to_hub(**kwargs)
649
+ else:
650
+ trainer.create_model_card(**kwargs)
651
+
652
+
653
+ def _mp_fn(index):
654
+ # For xla_spawn (TPUs)
655
+ main()
656
+
657
+
658
+ if __name__ == "__main__":
659
+ main()
train_AAVesm2_650M_800_v1.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node 2 /home/avuhong/AAVesm/run_mlm.py \
4
+ --model_name_or_path facebook/esm2_t33_650M_UR50D \
5
+ --tokenizer_name facebook/esm2_t33_650M_UR50D \
6
+ --train_file /home/avuhong/AAVesm/ds_seq_train.csv \
7
+ --validation_file /home/avuhong/AAVesm/ds_seq_val.csv \
8
+ --do_train --do_eval --learning_rate 1e-05 \
9
+ --per_device_train_batch_size 1 --gradient_accumulation_steps 4 --num_train_epochs 36 \
10
+ --per_device_eval_batch_size 1 --evaluation_strategy epoch \
11
+ --save_strategy no \
12
+ --overwrite_output_dir --output_dir output_AAVESM2_650M_v1 \
13
+ --fp16 --sharded_ddp simple \
14
+ --max_seq_length 800 \
15
+ --save_strategy epoch --save_total_limit 2