Use mina ashido on your prompt, this was trained on this collab: https://colab.research.google.com/drive/1bFX0pZczeApeFadrz1AdOb5TDdet2U0Z?usp=sharing
!accelerate launch \
--num_cpu_threads_per_process 8 train_network.py \
--network_module=networks.lora \
--pretrained_model_name_or_path="/content/model/nai.ckpt" \
--train_data_dir="/content/drive/MyDrive/sd/dataset/sincos" \
--reg_data_dir="/content/drive/MyDrive/sd/dataset/sincos_reg" \
--output_dir="/content/drive/MyDrive/sd/stable-diffusion-webui/models/lora/" \
--caption_extension=".txt" \
--prior_loss_weight=1 \
--resolution=768 \
--enable_bucket \
--min_bucket_reso=512 \
--max_bucket_reso=1152 \
--train_batch_size=3 \
--learning_rate=1e-4 \
--unet_lr=1.5e-4 \
--text_encoder_lr=1.5e-5 \
# --max_train_steps=8010 \
--mixed_precision="fp16" \
--save_precision="fp16" \
--use_8bit_adam \
--xformers \
--max_train_epochs=8\
--save_every_n_epochs=1\
--save_model_as=safetensors \
--clip_skip=2 \
--seed=23 \
--network_dim=128 \
--network_apha=128 \
--max_token_length=225 \
--cache_latents \
--lr_scheduler="cosine_with_restarts" \
--output_name="minaashido" \
--shuffle_caption \
# --keep_tokens=1 \
# --lr_warmup_steps=$lr_warmup_steps \