V1 sample images: https://civitai.com/posts/12553354
sample images for V1.
the following results in the large file size
[additional_network_arguments] unet_lr = 1.0 text_encoder_lr = 1.0 network_dim = 128 network_alpha = 16 network_module = "networks.lora"
[optimizer_arguments]
learning_rate = 1.0
lr_scheduler = "cosine"
lr_warmup_steps = 0
optimizer_type = "Prodigy"
optimizer_args = [ "decouple=True", "weight_decay=0.5", "betas=0.9,0.99", "use_bias_correction=False",]
[training_arguments]
max_train_steps = 0
max_train_epochs = 52
sample_sampler = "euler_a"
train_batch_size = 5
noise_offset = 0.03
clip_skip = 1
weighted_captions = false
max_token_length = 225
save_precision = "bf16"
mixed_precision = "bf16"
xformers = true
sdpa = true
no_half_vae = true
gradient_checkpointing = true
gradient_accumulation_steps = 1
[advanced_training_config]
multires_noise_iterations = 6
multires_noise_discount = 0.3
min_snr_gamma = 5.0
[model_arguments]
[dreambooth_arguments]
prior_loss_weight = 1.0
[dataset_arguments]
cache_latents = true
sample images for V1.
the following results in the large file size
[optimizer_arguments]
learning_rate = 1.0
lr_scheduler = "cosine"
lr_warmup_steps = 0
optimizer_type = "Prodigy"
optimizer_args = [ "decouple=True", "weight_decay=0.5", "betas=0.9,0.99", "use_bias_correction=False",]
[training_arguments]
max_train_steps = 0
max_train_epochs = 52
sample_sampler = "euler_a"
train_batch_size = 5
noise_offset = 0.03
clip_skip = 1
weighted_captions = false
max_token_length = 225
save_precision = "bf16"
mixed_precision = "bf16"
xformers = true
sdpa = true
no_half_vae = true
gradient_checkpointing = true
gradient_accumulation_steps = 1
[advanced_training_config]
multires_noise_iterations = 6
multires_noise_discount = 0.3
min_snr_gamma = 5.0
[model_arguments]
[dreambooth_arguments]
prior_loss_weight = 1.0
[dataset_arguments]
cache_latents = true