config_sample / loha_01 /config_file.toml
enternalsaga's picture
Update loha_01/config_file.toml
e375970
[[datasets]]
min_bucket_reso = 256
max_bucket_reso = 1024
caption_dropout_rate = 0
caption_tag_dropout_rate = 0
caption_dropout_every_n_epochs = 0
flip_aug = false
color_aug = false
[[datasets.subsets]]
num_repeats = 1
is_reg = true
[general]
enable_bucket = true
caption_extension = ".txt"
shuffle_caption = false
keep_tokens = 0
bucket_reso_steps = 64
bucket_no_upscale = false
[model_arguments]
v2 = false
v_parameterization = false
vae = "/content/VAE/VAE84EMA.vae.pt"
[additional_network_arguments]
no_metadata = false
unet_lr = 1.0
text_encoder_lr = 1.0
network_module = "lycoris.kohya"
network_dim = 32
network_alpha = 16
network_args = [ "conv_dim=1", "conv_alpha=1", "algo=loha",]
network_train_unet_only = false
network_train_text_encoder_only = false
[optimizer_arguments]
min_snr_gamma = 5
optimizer_type = "DAdaptation"
learning_rate = 1
max_grad_norm = 1.0
optimizer_args = [ "decouple=True", "weight_decay=0.4", "betas=0.9,0.99", "use_bias_correction=True", "growth_rate=1.02"]
lr_scheduler = "cosine"
lr_warmup_steps = 0
[dataset_arguments]
cache_latents = true
debug_dataset = false
vae_batch_size = 1
[training_arguments]
save_precision = "fp16"
save_every_n_epochs = 10.0
train_batch_size = 6
max_token_length = 225
mem_eff_attn = false
xformers = true
max_train_epochs = 140
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
gradient_checkpointing = false
gradient_accumulation_steps = 1
mixed_precision = "fp16"
clip_skip = 2
logging_dir = "/content/LoRA/log"
noise_offset = 0.01
lowram = false
[sample_prompt_arguments]
sample_every_n_epochs = 10.0
sample_sampler = "dpm_2_a"
[dreambooth_arguments]
prior_loss_weight = 1.0
[saving_arguments]
save_model_as = "safetensors"