Masioki commited on
Commit
d81c636
1 Parent(s): 82891d2

End of training

Browse files
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ model-index:
5
+ - name: fusion_gttbsc_distilbert-uncased-ft
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
10
+ should probably proofread and complete it, then remove this comment. -->
11
+
12
+ # fusion_gttbsc_distilbert-uncased-ft
13
+
14
+ This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
15
+
16
+ ## Model description
17
+
18
+ More information needed
19
+
20
+ ## Intended uses & limitations
21
+
22
+ More information needed
23
+
24
+ ## Training and evaluation data
25
+
26
+ More information needed
27
+
28
+ ## Training procedure
29
+
30
+ ### Training hyperparameters
31
+
32
+ The following hyperparameters were used during training:
33
+ - learning_rate: 0.0007
34
+ - train_batch_size: 2
35
+ - eval_batch_size: 2
36
+ - seed: 42
37
+ - gradient_accumulation_steps: 4
38
+ - total_train_batch_size: 8
39
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
+ - lr_scheduler_type: linear
41
+ - num_epochs: 20
42
+ - mixed_precision_training: Native AMP
43
+
44
+ ### Training results
45
+
46
+
47
+
48
+ ### Framework versions
49
+
50
+ - Transformers 4.41.2
51
+ - Pytorch 2.3.0+cu121
52
+ - Datasets 2.19.2
53
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "FusionCrossAttentionSentenceClassifier"
4
+ ],
5
+ "dropout": 0.3,
6
+ "embedding_strategy": "self-att",
7
+ "fp16": true,
8
+ "fusion_layers": 2,
9
+ "fusion_strategy": "dense",
10
+ "heads": 8,
11
+ "hidden_size": 768,
12
+ "k1_backbone": "whisper-encoder-small",
13
+ "k1_freezed": true,
14
+ "k1_kwargs": {
15
+ "load_in_4bit": false
16
+ },
17
+ "k2_backbone": "transformer-prosody-encoder192",
18
+ "k2_freezed": false,
19
+ "k2_kwargs": {
20
+ "dropout": 0.3,
21
+ "heads": 8,
22
+ "input_size": 5,
23
+ "num_layers": 2
24
+ },
25
+ "labels": 18,
26
+ "model_type": "fusion-cross-attention-sentence-classifier",
27
+ "multilabel": true,
28
+ "q_backbone": "distilbert-base-uncased-lora",
29
+ "q_freezed": false,
30
+ "q_kwargs": {},
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.41.2"
33
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0046141292b9c1fe21afecd9322b0af37d8046d43fd127e8dbdba2b4c183307
3
+ size 388182868
runs/Jun09_20-54-49_4c70ee357d9f/events.out.tfevents.1717966490.4c70ee357d9f.4846.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f12a36dc11aa9cb030fc82cdfc3f3d03be9472d7f83afa75b35506b6bfd054e
3
+ size 4704
runs/Jun09_20-55-35_4c70ee357d9f/events.out.tfevents.1717966537.4c70ee357d9f.4846.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:064c047027223b6e14494dd377becaad6b9be6d231750cb9d40627dc0f89c632
3
+ size 43909
runs/Jun09_20-55-35_4c70ee357d9f/events.out.tfevents.1717975023.4c70ee357d9f.4846.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23e6456bf72ce4e73665e590c59dffb36e978ccdfff717f4998535d0ad69fd88
3
+ size 1548
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "DistilBertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a085bc7a64153754f0cac53b494bc846faa3b01daa2683edf352a6aa48b5ae91
3
+ size 5240
vocab.txt ADDED
The diff for this file is too large to render. See raw diff