scottsuk0306 commited on
Commit
608312e
1 Parent(s): fda18b6

Model save

Browse files
Files changed (4) hide show
  1. README.md +9 -9
  2. all_results.json +4 -9
  3. train_results.json +4 -4
  4. trainer_state.json +51 -43
README.md CHANGED
@@ -5,7 +5,6 @@ base_model: alignment-handbook/zephyr-7b-sft-full
5
  tags:
6
  - trl
7
  - sft
8
- - alignment-handbook
9
  - generated_from_trainer
10
  datasets:
11
  - generator
@@ -21,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
21
 
22
  This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the generator dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 0.4948
25
 
26
  ## Model description
27
 
@@ -59,13 +58,14 @@ The following hyperparameters were used during training:
59
  |:-------------:|:-----:|:----:|:---------------:|
60
  | 1.1139 | 1.0 | 1 | 1.1133 |
61
  | 1.1139 | 2.0 | 2 | 1.2947 |
62
- | 1.1139 | 3.0 | 3 | 1.0839 |
63
- | 1.1139 | 4.0 | 4 | 0.8506 |
64
- | 1.0867 | 5.0 | 5 | 0.7521 |
65
- | 1.0867 | 6.0 | 6 | 0.6283 |
66
- | 1.0867 | 7.0 | 7 | 0.5677 |
67
- | 1.0867 | 8.0 | 8 | 0.5233 |
68
- | 1.0867 | 9.0 | 9 | 0.4948 |
 
69
 
70
 
71
  ### Framework versions
 
5
  tags:
6
  - trl
7
  - sft
 
8
  - generated_from_trainer
9
  datasets:
10
  - generator
 
20
 
21
  This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.4842
24
 
25
  ## Model description
26
 
 
58
  |:-------------:|:-----:|:----:|:---------------:|
59
  | 1.1139 | 1.0 | 1 | 1.1133 |
60
  | 1.1139 | 2.0 | 2 | 1.2947 |
61
+ | 1.1139 | 3.0 | 3 | 1.0840 |
62
+ | 1.1139 | 4.0 | 4 | 0.8533 |
63
+ | 1.0874 | 5.0 | 5 | 0.7534 |
64
+ | 1.0874 | 6.0 | 6 | 0.6295 |
65
+ | 1.0874 | 7.0 | 7 | 0.5679 |
66
+ | 1.0874 | 8.0 | 8 | 0.5225 |
67
+ | 1.0874 | 9.0 | 9 | 0.4936 |
68
+ | 0.5928 | 10.0 | 10 | 0.4842 |
69
 
70
 
71
  ### Framework versions
all_results.json CHANGED
@@ -1,14 +1,9 @@
1
  {
2
  "epoch": 10.0,
3
- "eval_loss": 1.0543607473373413,
4
- "eval_runtime": 4.7419,
5
- "eval_samples": 424,
6
- "eval_samples_per_second": 6.537,
7
- "eval_steps_per_second": 0.211,
8
  "total_flos": 8375186227200.0,
9
- "train_loss": 0.0,
10
- "train_runtime": 1.4498,
11
  "train_samples": 851,
12
- "train_samples_per_second": 331.078,
13
- "train_steps_per_second": 6.897
14
  }
 
1
  {
2
  "epoch": 10.0,
 
 
 
 
 
3
  "total_flos": 8375186227200.0,
4
+ "train_loss": 0.8427552580833435,
5
+ "train_runtime": 304.7513,
6
  "train_samples": 851,
7
+ "train_samples_per_second": 1.575,
8
+ "train_steps_per_second": 0.033
9
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 10.0,
3
  "total_flos": 8375186227200.0,
4
- "train_loss": 0.0,
5
- "train_runtime": 1.4498,
6
  "train_samples": 851,
7
- "train_samples_per_second": 331.078,
8
- "train_steps_per_second": 6.897
9
  }
 
1
  {
2
  "epoch": 10.0,
3
  "total_flos": 8375186227200.0,
4
+ "train_loss": 0.8427552580833435,
5
+ "train_runtime": 304.7513,
6
  "train_samples": 851,
7
+ "train_samples_per_second": 1.575,
8
+ "train_steps_per_second": 0.033
9
  }
trainer_state.json CHANGED
@@ -10,7 +10,7 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "grad_norm": 18.364192038860114,
14
  "learning_rate": 1e-05,
15
  "loss": 1.1139,
16
  "step": 1
@@ -18,97 +18,105 @@
18
  {
19
  "epoch": 1.0,
20
  "eval_loss": 1.1133455038070679,
21
- "eval_runtime": 2.8092,
22
- "eval_samples_per_second": 17.087,
23
- "eval_steps_per_second": 0.356,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 2.0,
28
- "eval_loss": 1.2946728467941284,
29
- "eval_runtime": 2.6987,
30
- "eval_samples_per_second": 17.786,
31
- "eval_steps_per_second": 0.371,
32
  "step": 2
33
  },
34
  {
35
  "epoch": 3.0,
36
- "eval_loss": 1.0838984251022339,
37
- "eval_runtime": 2.6695,
38
- "eval_samples_per_second": 17.981,
39
- "eval_steps_per_second": 0.375,
40
  "step": 3
41
  },
42
  {
43
  "epoch": 4.0,
44
- "eval_loss": 0.8506386280059814,
45
- "eval_runtime": 2.6924,
46
- "eval_samples_per_second": 17.828,
47
  "eval_steps_per_second": 0.371,
48
  "step": 4
49
  },
50
  {
51
  "epoch": 5.0,
52
- "grad_norm": 19.328671929154766,
53
  "learning_rate": 5.8682408883346535e-06,
54
- "loss": 1.0867,
55
  "step": 5
56
  },
57
  {
58
  "epoch": 5.0,
59
- "eval_loss": 0.7521028518676758,
60
- "eval_runtime": 2.6708,
61
- "eval_samples_per_second": 17.972,
62
- "eval_steps_per_second": 0.374,
63
  "step": 5
64
  },
65
  {
66
  "epoch": 6.0,
67
- "eval_loss": 0.628281831741333,
68
- "eval_runtime": 2.7091,
69
- "eval_samples_per_second": 17.718,
70
- "eval_steps_per_second": 0.369,
71
  "step": 6
72
  },
73
  {
74
  "epoch": 7.0,
75
- "eval_loss": 0.5677043795585632,
76
- "eval_runtime": 2.6968,
77
- "eval_samples_per_second": 17.799,
78
- "eval_steps_per_second": 0.371,
79
  "step": 7
80
  },
81
  {
82
  "epoch": 8.0,
83
- "eval_loss": 0.5233017802238464,
84
- "eval_runtime": 2.6981,
85
- "eval_samples_per_second": 17.79,
86
- "eval_steps_per_second": 0.371,
87
  "step": 8
88
  },
89
  {
90
  "epoch": 9.0,
91
- "eval_loss": 0.4948171079158783,
92
- "eval_runtime": 2.6682,
93
- "eval_samples_per_second": 17.99,
94
- "eval_steps_per_second": 0.375,
95
  "step": 9
96
  },
97
  {
98
  "epoch": 10.0,
99
- "grad_norm": 4.900067571807948,
100
  "learning_rate": 0.0,
101
- "loss": 0.5927,
 
 
 
 
 
 
 
 
102
  "step": 10
103
  },
104
  {
105
  "epoch": 10.0,
106
  "step": 10,
107
  "total_flos": 8375186227200.0,
108
- "train_loss": 0.0,
109
- "train_runtime": 1.4498,
110
- "train_samples_per_second": 331.078,
111
- "train_steps_per_second": 6.897
112
  }
113
  ],
114
  "logging_steps": 5,
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "grad_norm": 18.363699029410846,
14
  "learning_rate": 1e-05,
15
  "loss": 1.1139,
16
  "step": 1
 
18
  {
19
  "epoch": 1.0,
20
  "eval_loss": 1.1133455038070679,
21
+ "eval_runtime": 2.7908,
22
+ "eval_samples_per_second": 17.199,
23
+ "eval_steps_per_second": 0.358,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 2.0,
28
+ "eval_loss": 1.2946919202804565,
29
+ "eval_runtime": 2.6849,
30
+ "eval_samples_per_second": 17.878,
31
+ "eval_steps_per_second": 0.372,
32
  "step": 2
33
  },
34
  {
35
  "epoch": 3.0,
36
+ "eval_loss": 1.084022879600525,
37
+ "eval_runtime": 2.6785,
38
+ "eval_samples_per_second": 17.921,
39
+ "eval_steps_per_second": 0.373,
40
  "step": 3
41
  },
42
  {
43
  "epoch": 4.0,
44
+ "eval_loss": 0.8533250689506531,
45
+ "eval_runtime": 2.6989,
46
+ "eval_samples_per_second": 17.785,
47
  "eval_steps_per_second": 0.371,
48
  "step": 4
49
  },
50
  {
51
  "epoch": 5.0,
52
+ "grad_norm": 20.40164947404992,
53
  "learning_rate": 5.8682408883346535e-06,
54
+ "loss": 1.0874,
55
  "step": 5
56
  },
57
  {
58
  "epoch": 5.0,
59
+ "eval_loss": 0.7533825039863586,
60
+ "eval_runtime": 2.6807,
61
+ "eval_samples_per_second": 17.906,
62
+ "eval_steps_per_second": 0.373,
63
  "step": 5
64
  },
65
  {
66
  "epoch": 6.0,
67
+ "eval_loss": 0.6295236945152283,
68
+ "eval_runtime": 2.68,
69
+ "eval_samples_per_second": 17.911,
70
+ "eval_steps_per_second": 0.373,
71
  "step": 6
72
  },
73
  {
74
  "epoch": 7.0,
75
+ "eval_loss": 0.567865788936615,
76
+ "eval_runtime": 2.6867,
77
+ "eval_samples_per_second": 17.866,
78
+ "eval_steps_per_second": 0.372,
79
  "step": 7
80
  },
81
  {
82
  "epoch": 8.0,
83
+ "eval_loss": 0.5224726796150208,
84
+ "eval_runtime": 2.6682,
85
+ "eval_samples_per_second": 17.989,
86
+ "eval_steps_per_second": 0.375,
87
  "step": 8
88
  },
89
  {
90
  "epoch": 9.0,
91
+ "eval_loss": 0.4935952425003052,
92
+ "eval_runtime": 2.672,
93
+ "eval_samples_per_second": 17.964,
94
+ "eval_steps_per_second": 0.374,
95
  "step": 9
96
  },
97
  {
98
  "epoch": 10.0,
99
+ "grad_norm": 4.71394718703185,
100
  "learning_rate": 0.0,
101
+ "loss": 0.5928,
102
+ "step": 10
103
+ },
104
+ {
105
+ "epoch": 10.0,
106
+ "eval_loss": 0.48424649238586426,
107
+ "eval_runtime": 2.6942,
108
+ "eval_samples_per_second": 17.816,
109
+ "eval_steps_per_second": 0.371,
110
  "step": 10
111
  },
112
  {
113
  "epoch": 10.0,
114
  "step": 10,
115
  "total_flos": 8375186227200.0,
116
+ "train_loss": 0.8427552580833435,
117
+ "train_runtime": 304.7513,
118
+ "train_samples_per_second": 1.575,
119
+ "train_steps_per_second": 0.033
120
  }
121
  ],
122
  "logging_steps": 5,