TLFZ commited on
Commit
0aae28b
1 Parent(s): 47dd4a1

Upload 20240726-154113.toml

Browse files
Files changed (1) hide show
  1. 20240726-154113.toml +52 -0
20240726-154113.toml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pretrained_model_name_or_path = "D:/stablediffusion/sdwebuiakiv4.2/models/Stable-diffusion/sdxl 2d/animagineXLV31_v31.safetensors"
2
+ vae = "D:/stablediffusion/lora-scripts/VAE/sdxl-vae-fp16-fix.safetensors"
3
+ train_data_dir = "D:/stablediffusion/lora-scripts/train/try/classified/all"
4
+ prior_loss_weight = 1
5
+ resolution = "1024,1024"
6
+ enable_bucket = true
7
+ min_bucket_reso = 256
8
+ max_bucket_reso = 4096
9
+ bucket_reso_steps = 32
10
+ output_name = "summer memories v8.4 (多服装正则原图翻转)"
11
+ output_dir = "./output"
12
+ save_model_as = "safetensors"
13
+ save_precision = "bf16"
14
+ save_every_n_epochs = 1
15
+ save_state = true
16
+ max_train_epochs = 6
17
+ train_batch_size = 4
18
+ gradient_checkpointing = true
19
+ gradient_accumulation_steps = 16
20
+ network_train_unet_only = false
21
+ network_train_text_encoder_only = false
22
+ learning_rate = 4e-5
23
+ unet_lr = 4e-5
24
+ text_encoder_lr = 2e-6
25
+ lr_scheduler = "cosine_with_restarts"
26
+ lr_warmup_steps = 0
27
+ lr_scheduler_num_cycles = 3
28
+ optimizer_type = "Lion8bit"
29
+ min_snr_gamma = 5
30
+ network_module = "lycoris.kohya"
31
+ network_dim = 100000
32
+ network_alpha = 1
33
+ scale_weight_norms = 1
34
+ log_with = "tensorboard"
35
+ logging_dir = "./logs"
36
+ caption_extension = ".txt"
37
+ shuffle_caption = true
38
+ keep_tokens = 1
39
+ max_token_length = 255
40
+ caption_tag_dropout_rate = 0.2
41
+ multires_noise_iterations = 6
42
+ multires_noise_discount = 0.3
43
+ seed = 1337
44
+ mixed_precision = "bf16"
45
+ full_bf16 = true
46
+ xformers = true
47
+ lowram = false
48
+ cache_latents = true
49
+ cache_latents_to_disk = true
50
+ persistent_data_loader_workers = true
51
+ network_args = [ "conv_dim=8", "conv_alpha=1", "dropout=0", "algo=lokr", "factor=10", "preset=attn-mlp",]
52
+ optimizer_args = [ "weight_decay=0.1", "betas=0.9,0.99",]