-
Notifications
You must be signed in to change notification settings - Fork 19
Expand file tree
/
Copy patha31_char.toml
More file actions
131 lines (110 loc) · 2.55 KB
/
a31_char.toml
File metadata and controls
131 lines (110 loc) · 2.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
[Basics]
pretrained_model_name_or_path = "/root/autodl-tmp/models/checkpoint/nn-e3_s18126.safetensors"
train_data_dir = "/root/autodl-tmp/chiikawa_char/wusaqi"
resolution = "1024"
seed = 114514
max_train_steps = 10000
max_train_epochs = 30
clip_skip = 2
cache_text_encoder_outputs = false
keep_tokens_separator = "|||"
[Save]
output_dir = "/root/autodl-fs/auto_lora"
output_name = "wusaqi"
save_precision = "fp16"
save_model_as = "safetensors"
save_every_n_epochs = 10
save_every_n_steps = 5000
save_state = false
save_last_n_steps_state = 0
[SDv2]
v2 = false
v_parameterization = false
scale_v_pred_loss_like_noise_pred = false
[Network_setup]
network_dim = 8
network_alpha = 4
dim_from_weights = false
network_dropout = 0
network_train_unet_only = true
network_train_text_encoder_only = false
resume = false
[LyCORIS]
network_module = "lycoris.kohya"
network_args = [ "preset=attn-mlp", "algo=lora",]
[Optimizer]
train_batch_size = 8
gradient_checkpointing = true
gradient_accumulation_steps = 2
optimizer_type = "Lion"
unet_lr = 0.0001
text_encoder_lr = 2e-5
max_grad_norm = 1.0
optimizer_args = [ "weight_decay=0.1", "betas=0.9,0.99",]
[Lr_scheduler]
lr_scheduler_type = ""
lr_scheduler = "constant"
lr_warmup_steps = 0
lr_scheduler_num_cycles = 1
lr_scheduler_power = 1.0
[Training_preciscion]
mixed_precision = "fp16"
full_fp16 = false
full_bf16 = false
[Further_improvement]
min_snr_gamma = 0
noise_offset = 0.0357
[ARB]
enable_bucket = true
min_bucket_reso = 384
max_bucket_reso = 2176
bucket_reso_steps = 64
bucket_no_upscale = false
[Captions]
shuffle_caption = true
caption_extension = ".txt"
keep_tokens = 1
caption_dropout_rate = 0.05
caption_dropout_every_n_epochs = 0
caption_tag_dropout_rate = 0.05
max_token_length = 225
weighted_captions = false
token_warmup_min = 1
token_warmup_step = 0
[Attention]
mem_eff_attn = false
xformers = true
[Data_augmentation]
color_aug = false
flip_aug = false
random_crop = false
[Cache_latents]
cache_latents = true
vae_batch_size = 4
cache_latents_to_disk = true
[Sampling_during_training]
sample_sampler = "ddim"
[Logging]
logging_dir = "logs_training"
log_with = "tensorboard"
log_prefix = "lora_"
wandb_api_key = ""
[Dataset]
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
dataset_repeats = 1
[Regularization]
prior_loss_weight = 1.0
[Huggingface]
save_state_to_huggingface = false
resume_from_huggingface = false
async_upload = false
[Debugging]
debug_dataset = false
[Deprecated]
use_8bit_adam = false
use_lion_optimizer = false
learning_rate = 0.0005
[Others]
lowram = false
training_comment = "neta_lora"