From 8ab9b5b07dc05325b267e2cd8912b1315f7b0f17 Mon Sep 17 00:00:00 2001 From: PromeAIpro Date: Fri, 30 Aug 2024 01:55:22 +0000 Subject: [PATCH 01/40] add train flux-controlnet scripts in example. --- examples/controlnet/README_flux.md | 177 +++ examples/controlnet/requirements_flux.txt | 9 + examples/controlnet/train_controlnet_flux.py | 1322 ++++++++++++++++++ 3 files changed, 1508 insertions(+) create mode 100644 examples/controlnet/README_flux.md create mode 100644 examples/controlnet/requirements_flux.txt create mode 100644 examples/controlnet/train_controlnet_flux.py diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md new file mode 100644 index 000000000000..a7c82eef583e --- /dev/null +++ b/examples/controlnet/README_flux.md @@ -0,0 +1,177 @@ +# ControlNet training example for FLUX + +The `train_controlnet_flux.py` script shows how to implement the ControlNet training procedure and adapt it for [FLUX](https://github.com/black-forest-labs/flux). + +Training script provided by LibAI, which is an institution dedicated to the progress and achievement of artificial general intelligence.LibAI is the developer of [cutout.pro](https://www.cutout.pro/) and [promeai.pro](https://www.promeai.pro/). + +## Running locally with PyTorch + +### Installing the dependencies + +Before running the scripts, make sure to install the library's training dependencies: + +**Important** + +To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: + +```bash +git clone https://github.com/huggingface/diffusers +cd diffusers +pip install -e . +``` + +Then cd in the `examples/controlnet` folder and run +```bash +pip install -r requirements_flux.txt +``` + +And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: + +```bash +accelerate config +``` + +Or for a default accelerate configuration without answering questions about your environment + +```bash +accelerate config default +``` + +Or if your environment doesn't support an interactive shell (e.g., a notebook) + +```python +from accelerate.utils import write_basic_config +write_basic_config() +``` + +When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. + +## Custom Datasets + +We support importing data from jsonl(xxx.jsonl),here is a brief example: +```sh +{"image_path": "xxx", "caption": "xxx", "control_path": "xxx"} +{"image_path": "xxx", "caption": "xxx", "control_path": "xxx"} +``` + + +## Training + +Our training examples use two test conditioning images. They can be downloaded by running + +```sh +wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png +wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png +``` + +Then run `huggingface-cli login` to log into your Hugging Face account. This is needed to be able to push the trained ControlNet parameters to Hugging Face Hub. + +we can define the num_layers, num_single_layers, which determines the size of the control(default values are num_layers=4, num_single_layers=10) + + +```bash +export MODEL_DIR="black-forest-labs/FLUX.1-dev" +export OUTPUT_DIR="path to save model" +export TRAIN_JSON_FILE="path to your jsonl file" + + +accelerate launch train_controlnet_flux.py \ + --pretrained_model_name_or_path=$MODEL_DIR \ + --conditioning_image_column=control_path \ + --image_column=image_path \ + --caption_column=caption \ + --output_dir=$OUTPUT_DIR \ + --jsonl_for_train=$TRAIN_JSON_FILE \ + --mixed_precision="bf16" \ + --resolution=512 \ + --learning_rate=1e-5 \ + --max_train_steps=15000 \ + --validation_steps=100 \ + --checkpointing_steps=200 \ + --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ + --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ + --train_batch_size=1 \ + --gradient_accumulation_steps=4 \ + --report_to="tensorboard" \ + --num_double_layers=4 \ + --num_single_layers=0 \ + --seed=42 \ +``` + +To better track our training experiments, we're using the following flags in the command above: + +* `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`. +* `validation_image`, `validation_prompt`, and `validation_steps` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. + +Our experiments were conducted on a single 40GB A100 GPU. + +### Inference + +Once training is done, we can perform inference like so: + +```python +import torch +from diffusers.utils import load_image +from diffusers.pipelines.flux.pipeline_flux_controlnet import FluxControlNetPipeline +from diffusers.models.controlnet_flux import FluxControlNetModel + +base_model = 'black-forest-labs/FLUX.1-dev' +controlnet_model = 'path to controlnet' +controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16) +pipe = FluxControlNetPipeline.from_pretrained(base_model, + controlnet=controlnet, + torch_dtype=torch.bfloat16) +pipe.to("cuda") + +control_image = load_image("./conditioning_image_1.png").resize((1024, 1024)) +prompt = "pale golden rod circle with old lace background" + +image = pipe( + prompt, + control_image=control_image, + controlnet_conditioning_scale=0.6, + num_inference_steps=28, + guidance_scale=3.5, +).images[0] +image.save("./output.png") +``` + +## Notes + +### T5 dont support bf16 autocast and i dont know why, will cause black image. + +```diff +if is_final_validation or torch.backends.mps.is_available(): + autocast_ctx = nullcontext() +else: + # t5 seems not support autocast and i don't know why ++ autocast_ctx = nullcontext() +- autocast_ctx = torch.autocast(accelerator.device.type) +``` + +### TO Fix Error + +```bash +RuntimeError: mat1 and mat2 must have the same dtype, but got Float and BFloat16 +``` + +#### we need to change some code in `diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py` to ensure the dtype + +```diff +noise_pred = self.transformer( + hidden_states=latents, + # YiYi notes: divide it by 1000 for now because we scale it by 1000 in the transforme rmodel (we should not keep it but I want to keep the inputs same for the model for testing) + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, +- controlnet_block_samples=controlnet_block_samples, +- controlnet_single_block_samples=controlnet_single_block_samples, ++ controlnet_block_samples=[sample.to(dtype=latents.dtype) for sample in controlnet_block_samples]if controlnet_block_samples is not None else None, ++ controlnet_single_block_samples=[sample.to(dtype=latents.dtype) for sample in controlnet_single_block_samples] if controlnet_single_block_samples is not None else None, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, +)[0] +``` \ No newline at end of file diff --git a/examples/controlnet/requirements_flux.txt b/examples/controlnet/requirements_flux.txt new file mode 100644 index 000000000000..388444fbc65b --- /dev/null +++ b/examples/controlnet/requirements_flux.txt @@ -0,0 +1,9 @@ +accelerate>=0.16.0 +torchvision +transformers>=4.25.1 +ftfy +tensorboard +Jinja2 +datasets +wandb +SentencePiece \ No newline at end of file diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py new file mode 100644 index 000000000000..bc430dd13223 --- /dev/null +++ b/examples/controlnet/train_controlnet_flux.py @@ -0,0 +1,1322 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +import argparse +import functools +import gc +import logging +import math +import os +import random +import shutil +from contextlib import nullcontext +from pathlib import Path + +import accelerate +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +import transformers +from accelerate import Accelerator +from accelerate.logging import get_logger +from accelerate.utils import DistributedType, ProjectConfiguration, set_seed +from datasets import load_dataset +from huggingface_hub import create_repo +from packaging import version +from PIL import Image +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import ( + AutoTokenizer, + CLIPTextModel, + T5EncoderModel, +) + +import diffusers +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + FluxTransformer2DModel, +) +from diffusers.models.controlnet_flux import FluxControlNetModel +from diffusers.optimization import get_scheduler +from diffusers.pipelines.flux.pipeline_flux_controlnet import FluxControlNetPipeline +from diffusers.utils import check_min_version, is_wandb_available +from diffusers.utils.import_utils import is_torch_npu_available, is_xformers_available +from diffusers.utils.torch_utils import is_compiled_module + + +if is_wandb_available(): + import wandb + +# Will error if the minimal version of diffusers is not installed. Remove at your own risks. +check_min_version("0.31.0.dev0") + +logger = get_logger(__name__) +if is_torch_npu_available(): + torch.npu.config.allow_internal_format = False + + +def log_validation( + vae, flux_transformer, flux_controlnet, args, accelerator, weight_dtype, step, is_final_validation=False +): + logger.info("Running validation... ") + + if not is_final_validation: + flux_controlnet = accelerator.unwrap_model(flux_controlnet) + + pipeline = FluxControlNetPipeline.from_pretrained( + args.pretrained_model_name_or_path, + controlnet=flux_controlnet, + transformer=flux_transformer, + torch_dtype=torch.bfloat16, + ) + else: + flux_controlnet = FluxControlNetModel.from_pretrained(args.output_dir, torch_dtype=torch.bfloat16) + pipeline = FluxControlNetPipeline.from_pretrained( + args.pretrained_model_name_or_path, + controlnet=flux_controlnet, + transformer=flux_transformer, + torch_dtype=torch.bfloat16, + ) + + # pipeline.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipeline.scheduler.config) + pipeline = pipeline.to(accelerator.device) + pipeline.set_progress_bar_config(disable=True) + + if args.enable_xformers_memory_efficient_attention: + pipeline.enable_xformers_memory_efficient_attention() + + if args.seed is None: + generator = None + else: + generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) + + if len(args.validation_image) == len(args.validation_prompt): + validation_images = args.validation_image + validation_prompts = args.validation_prompt + elif len(args.validation_image) == 1: + validation_images = args.validation_image * len(args.validation_prompt) + validation_prompts = args.validation_prompt + elif len(args.validation_prompt) == 1: + validation_images = args.validation_image + validation_prompts = args.validation_prompt * len(args.validation_image) + else: + raise ValueError( + "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`" + ) + + image_logs = [] + if is_final_validation or torch.backends.mps.is_available(): + autocast_ctx = nullcontext() + else: + # t5 seems not support autocast and i don't know why + autocast_ctx = nullcontext() + + # autocast_ctx = torch.autocast(accelerator.device.type) + + for validation_prompt, validation_image in zip(validation_prompts, validation_images): + from diffusers.utils import load_image + + validation_image = load_image(validation_image) + # need to inference on 1024 to get a good image + validation_image = validation_image.resize((1024, 1024)) + # validation_image = validation_image.resize((args.resolution, args.resolution)) + + images = [] + + for _ in range(args.num_validation_images): + with autocast_ctx: + # need to fix in pipeline_flux_controlnet + + image = pipeline( + prompt=validation_prompt, + control_image=validation_image, + num_inference_steps=28, + controlnet_conditioning_scale=0.7, + guidance_scale=3.5, + generator=generator, + ).images[0] + image.save("image.jpg") + images.append(image) + image_logs.append( + {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt} + ) + + tracker_key = "test" if is_final_validation else "validation" + for tracker in accelerator.trackers: + if tracker.name == "tensorboard": + for log in image_logs: + images = log["images"] + validation_prompt = log["validation_prompt"] + validation_image = log["validation_image"] + + formatted_images = [] + + formatted_images.append(np.asarray(validation_image)) + + for image in images: + formatted_images.append(np.asarray(image)) + + formatted_images = np.stack(formatted_images) + + tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC") + elif tracker.name == "wandb": + formatted_images = [] + + for log in image_logs: + images = log["images"] + validation_prompt = log["validation_prompt"] + validation_image = log["validation_image"] + + formatted_images.append(wandb.Image(validation_image, caption="Controlnet conditioning")) + + for image in images: + image = wandb.Image(image, caption=validation_prompt) + formatted_images.append(image) + + tracker.log({tracker_key: formatted_images}) + else: + logger.warning(f"image logging not implemented for {tracker.name}") + + del pipeline + gc.collect() + torch.cuda.empty_cache() + + return image_logs + + +def parse_args(input_args=None): + parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.") + parser.add_argument( + "--pretrained_model_name_or_path", + type=str, + default=None, + required=True, + help="Path to pretrained model or model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--pretrained_vae_model_name_or_path", + type=str, + default=None, + help="Path to an improved VAE to stabilize training. For more details check out: https://github.com/huggingface/diffusers/pull/4038.", + ) + parser.add_argument( + "--flux_controlnet_model_name_or_path", + type=str, + default=None, + help="Path to pretrained controlnet model or model identifier from huggingface.co/models." + " If not specified controlnet weights are initialized from unet.", + ) + parser.add_argument( + "--variant", + type=str, + default=None, + help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", + ) + parser.add_argument( + "--revision", + type=str, + default=None, + required=False, + help="Revision of pretrained model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--tokenizer_name", + type=str, + default=None, + help="Pretrained tokenizer name or path if not the same as model_name", + ) + parser.add_argument( + "--output_dir", + type=str, + default="controlnet-model", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="The directory where the downloaded models and datasets will be stored.", + ) + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--resolution", + type=int, + default=512, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--crops_coords_top_left_h", + type=int, + default=0, + help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), + ) + parser.add_argument( + "--crops_coords_top_left_w", + type=int, + default=0, + help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."), + ) + parser.add_argument( + "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument("--num_train_epochs", type=int, default=1) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--checkpointing_steps", + type=int, + default=500, + help=( + "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. " + "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference." + "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components." + "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step" + "instructions." + ), + ) + parser.add_argument( + "--checkpoints_total_limit", + type=int, + default=None, + help=("Max number of checkpoints to store."), + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help=( + "Whether training should be resumed from a previous checkpoint. Use a path saved by" + ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' + ), + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-6, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--lr_num_cycles", + type=int, + default=1, + help="Number of hard resets of the lr in cosine_with_restarts scheduler.", + ) + parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") + parser.add_argument( + "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument( + "--use_adafactor", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument( + "--dataloader_num_workers", + type=int, + default=0, + help=( + "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." + ), + ) + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--allow_tf32", + action="store_true", + help=( + "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" + " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" + ), + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' + ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default=None, + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" + " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." + ), + ) + parser.add_argument( + "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." + ) + parser.add_argument( + "--enable_npu_flash_attention", action="store_true", help="Whether or not to use npu flash attention." + ) + parser.add_argument( + "--set_grads_to_none", + action="store_true", + help=( + "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" + " behaviors, so disable this argument if it causes any problems. More info:" + " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" + ), + ) + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help=( + "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," + " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," + " or to a folder containing files that 🤗 Datasets can understand." + ), + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The config of the Dataset, leave as None if there's only one config.", + ) + parser.add_argument( + "--train_data_dir", + type=str, + default=None, + help=( + "A folder containing the training data. Folder contents must follow the structure described in" + " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" + " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." + ), + ) + parser.add_argument( + "--image_column", type=str, default="image", help="The column of the dataset containing the target image." + ) + parser.add_argument( + "--conditioning_image_column", + type=str, + default="conditioning_image", + help="The column of the dataset containing the controlnet conditioning image.", + ) + parser.add_argument( + "--caption_column", + type=str, + default="text", + help="The column of the dataset containing a caption or a list of captions.", + ) + parser.add_argument( + "--max_train_samples", + type=int, + default=None, + help=( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ), + ) + parser.add_argument( + "--proportion_empty_prompts", + type=float, + default=0, + help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", + ) + parser.add_argument( + "--validation_prompt", + type=str, + default=None, + nargs="+", + help=( + "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`." + " Provide either a matching number of `--validation_image`s, a single `--validation_image`" + " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s." + ), + ) + parser.add_argument( + "--validation_image", + type=str, + default=None, + nargs="+", + help=( + "A set of paths to the controlnet conditioning image be evaluated every `--validation_steps`" + " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a" + " a single `--validation_prompt` to be used with all `--validation_image`s, or a single" + " `--validation_image` that will be used with all `--validation_prompt`s." + ), + ) + parser.add_argument( + "--num_double_layers", + type=int, + default=4, + help="Number of double layers in the controlnet (default: 4).", + ) + parser.add_argument( + "--num_single_layers", + type=int, + default=4, + help="Number of single layers in the controlnet (default: 4).", + ) + parser.add_argument( + "--num_validation_images", + type=int, + default=2, + help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair", + ) + parser.add_argument( + "--validation_steps", + type=int, + default=100, + help=( + "Run validation every X steps. Validation consists of running the prompt" + " `args.validation_prompt` multiple times: `args.num_validation_images`" + " and logging the images." + ), + ) + parser.add_argument( + "--tracker_project_name", + type=str, + default="sd_xl_train_controlnet", + help=( + "The `project_name` argument passed to Accelerator.init_trackers for" + " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" + ), + ) + parser.add_argument( + "--jsonl_for_train", + type=str, + default=None, + help="Path to the jsonl file containing the training data.", + ) + + if input_args is not None: + args = parser.parse_args(input_args) + else: + args = parser.parse_args() + + # if args.dataset_name is None and args.train_data_dir is None: + # raise ValueError("Specify either `--dataset_name` or `--train_data_dir`") + + # if args.dataset_name is not None and args.train_data_dir is not None: + # raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`") + + if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: + raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") + + if args.validation_prompt is not None and args.validation_image is None: + raise ValueError("`--validation_image` must be set if `--validation_prompt` is set") + + if args.validation_prompt is None and args.validation_image is not None: + raise ValueError("`--validation_prompt` must be set if `--validation_image` is set") + + if ( + args.validation_image is not None + and args.validation_prompt is not None + and len(args.validation_image) != 1 + and len(args.validation_prompt) != 1 + and len(args.validation_image) != len(args.validation_prompt) + ): + raise ValueError( + "Must provide either 1 `--validation_image`, 1 `--validation_prompt`," + " or the same number of `--validation_prompt`s and `--validation_image`s" + ) + + if args.resolution % 8 != 0: + raise ValueError( + "`--resolution` must be divisible by 8 for consistently sized encoded images between the VAE and the controlnet encoder." + ) + + return args + + +def get_train_dataset(args, accelerator): + # load from json + dataset = load_dataset("json", data_files=args.jsonl_for_train, cache_dir=args.cache_dir) + dataset = dataset.flatten_indices() + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + column_names = dataset["train"].column_names + + # 6. Get the column names for input/target. + if args.image_column is None: + image_column = column_names[0] + logger.info(f"image column defaulting to {image_column}") + else: + image_column = args.image_column + if image_column not in column_names: + raise ValueError( + f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" + ) + + if args.caption_column is None: + caption_column = column_names[1] + logger.info(f"caption column defaulting to {caption_column}") + else: + caption_column = args.caption_column + if caption_column not in column_names: + raise ValueError( + f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" + ) + + if args.conditioning_image_column is None: + conditioning_image_column = column_names[2] + logger.info(f"conditioning image column defaulting to {conditioning_image_column}") + else: + conditioning_image_column = args.conditioning_image_column + if conditioning_image_column not in column_names: + raise ValueError( + f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" + ) + + with accelerator.main_process_first(): + train_dataset = dataset["train"].shuffle(seed=args.seed) + if args.max_train_samples is not None: + train_dataset = train_dataset.select(range(args.max_train_samples)) + return train_dataset + + +def prepare_train_dataset(dataset, accelerator): + image_transforms = transforms.Compose( + [ + transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), + transforms.CenterCrop(args.resolution), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ] + ) + + conditioning_image_transforms = transforms.Compose( + [ + transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), + transforms.CenterCrop(args.resolution), + transforms.ToTensor(), + ] + ) + + def preprocess_train(examples): + # print(f"{examples[args.image_column]=}") + images = [ + (image.convert("RGB") if not isinstance(image, str) else Image.open(image).convert("RGB")) + for image in examples[args.image_column] + ] + images = [image_transforms(image) for image in images] + + conditioning_images = [ + (image.convert("RGB") if not isinstance(image, str) else Image.open(image).convert("RGB")) + for image in examples[args.conditioning_image_column] + ] + conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images] + examples["pixel_values"] = images + examples["conditioning_pixel_values"] = conditioning_images + + return examples + + with accelerator.main_process_first(): + dataset = dataset.with_transform(preprocess_train) + + return dataset + + +def collate_fn(examples): + pixel_values = torch.stack([example["pixel_values"] for example in examples]) + pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() + + conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples]) + conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float() + + prompt_ids = torch.stack([torch.tensor(example["prompt_embeds"]) for example in examples]) + + pooled_prompt_embeds = torch.stack([torch.tensor(example["pooled_prompt_embeds"]) for example in examples]) + text_ids = torch.stack([torch.tensor(example["text_ids"]) for example in examples]) + + return { + "pixel_values": pixel_values, + "conditioning_pixel_values": conditioning_pixel_values, + "prompt_ids": prompt_ids, + "unet_added_conditions": {"pooled_prompt_embeds": pooled_prompt_embeds, "time_ids": text_ids}, + } + + +def main(args): + # if args.report_to == "wandb" and args.hub_token is not None: + # raise ValueError( + # "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." + # " Please use `huggingface-cli login` to authenticate with the Hub." + # ) + + logging_out_dir = Path(args.output_dir, args.logging_dir) + + if torch.backends.mps.is_available() and args.mixed_precision == "bf16": + # due to pytorch#99272, MPS does not yet support bfloat16. + raise ValueError( + "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." + ) + + accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=str(logging_out_dir)) + + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=args.report_to, + project_config=accelerator_project_config, + ) + + # Disable AMP for MPS. A technique for accelerating machine learning computations on iOS and macOS devices. + if torch.backends.mps.is_available(): + print("MPS is enabled. Disabling AMP.") + accelerator.native_amp = False + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + # DEBUG, INFO, WARNING, ERROR, CRITICAL + level=logging.INFO, + ) + logger.info(accelerator.state, main_process_only=False) + + if accelerator.is_local_main_process: + transformers.utils.logging.set_verbosity_warning() + diffusers.utils.logging.set_verbosity_info() + else: + transformers.utils.logging.set_verbosity_error() + diffusers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if accelerator.is_main_process: + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + if args.push_to_hub: + repo_id = create_repo( + repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token + ).repo_id + + # Load the tokenizers + # load clip tokenizer + tokenizer_one = AutoTokenizer.from_pretrained( + args.pretrained_model_name_or_path, + subfolder="tokenizer", + revision=args.revision, + use_fast=False, + ) + # load t5 tokenizer + tokenizer_two = AutoTokenizer.from_pretrained( + args.pretrained_model_name_or_path, + subfolder="tokenizer_2", + revision=args.revision, + use_fast=False, + ) + # load clip text encoder + text_encoder_one = CLIPTextModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant + ) + # load t5 text encoder + text_encoder_two = T5EncoderModel.from_pretrained( + args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant + ) + + vae = AutoencoderKL.from_pretrained( + args.pretrained_model_name_or_path, + subfolder="vae", + revision=args.revision, + variant=args.variant, + ) + flux_transformer = FluxTransformer2DModel.from_pretrained( + args.pretrained_model_name_or_path, + subfolder="transformer", + revision=args.revision, + variant=args.variant, + ) + if args.flux_controlnet_model_name_or_path: + logger.info("Loading existing controlnet weights") + flux_controlnet = FluxControlNetModel.from_pretrained(args.flux_controlnet_model_name_or_path) + else: + logger.info("Initializing controlnet weights from transformer") + # we can define the num_layers, num_single_layers, + # default values are num_layers=4, num_single_layers=10 + flux_controlnet = FluxControlNetModel.from_transformer( + flux_transformer, + num_layers=args.num_double_layers, + num_single_layers=args.num_single_layers, + ) + logger.info("all models loaded successfully") + + noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( + args.pretrained_model_name_or_path, + subfolder="scheduler", + ) + + vae.requires_grad_(False) + flux_transformer.requires_grad_(False) + text_encoder_one.requires_grad_(False) + text_encoder_two.requires_grad_(False) + flux_controlnet.train() + + # use some pipeline function + flux_controlnet_pipeline = FluxControlNetPipeline( + scheduler=noise_scheduler, + vae=vae, + text_encoder=text_encoder_one, + tokenizer=tokenizer_one, + text_encoder_2=text_encoder_two, + tokenizer_2=tokenizer_two, + transformer=flux_transformer, + controlnet=flux_controlnet, + ).to(accelerator.device) + + def unwrap_model(model): + model = accelerator.unwrap_model(model) + model = model._orig_mod if is_compiled_module(model) else model + return model + + # `accelerate` 0.16.0 will have better support for customized saving + if version.parse(accelerate.__version__) >= version.parse("0.16.0"): + # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format + def save_model_hook(models, weights, output_dir): + if accelerator.is_main_process: + i = len(weights) - 1 + + while len(weights) > 0: + weights.pop() + model = models[i] + + sub_dir = "flux_controlnet" + model.save_pretrained(os.path.join(output_dir, sub_dir)) + + i -= 1 + + def load_model_hook(models, input_dir): + while len(models) > 0: + # pop models so that they are not loaded again + model = models.pop() + + # load diffusers style into model + load_model = FluxControlNetModel.from_pretrained(input_dir, subfolder="controlnet") + model.register_to_config(**load_model.config) + + model.load_state_dict(load_model.state_dict()) + del load_model + + accelerator.register_save_state_pre_hook(save_model_hook) + accelerator.register_load_state_pre_hook(load_model_hook) + + if args.enable_npu_flash_attention: + if is_torch_npu_available(): + logger.info("npu flash attention enabled.") + flux_transformer.enable_npu_flash_attention() + else: + raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu devices.") + + if args.enable_xformers_memory_efficient_attention: + if is_xformers_available(): + import xformers + + xformers_version = version.parse(xformers.__version__) + if xformers_version == version.parse("0.0.16"): + logger.warning( + "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." + ) + flux_transformer.enable_xformers_memory_efficient_attention() + flux_controlnet.enable_xformers_memory_efficient_attention() + else: + raise ValueError("xformers is not available. Make sure it is installed correctly") + + if args.gradient_checkpointing: + flux_transformer.enable_gradient_checkpointing() + flux_controlnet.enable_gradient_checkpointing() + + # Check that all trainable models are in full precision + low_precision_error_string = ( + " Please make sure to always have all model weights in full float32 precision when starting training - even if" + " doing mixed precision training, copy of the weights should still be float32." + ) + + if unwrap_model(flux_controlnet).dtype != torch.float32: + raise ValueError( + f"Controlnet loaded as datatype {unwrap_model(flux_controlnet).dtype}. {low_precision_error_string}" + ) + + # Enable TF32 for faster training on Ampere GPUs, + # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices + if args.allow_tf32: + torch.backends.cuda.matmul.allow_tf32 = True + + if args.scale_lr: + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes + ) + + # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs + if args.use_8bit_adam: + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError( + "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." + ) + + optimizer_class = bnb.optim.AdamW8bit + else: + optimizer_class = torch.optim.AdamW + + # Optimizer creation + params_to_optimize = flux_controlnet.parameters() + # use adafactor optimizer to save gpu memory + if args.use_adafactor: + from transformers import Adafactor + + optimizer = Adafactor( + params_to_optimize, + lr=args.learning_rate, + scale_parameter=False, + relative_step=False, + # warmup_init=True, + weight_decay=args.adam_weight_decay, + ) + else: + optimizer = optimizer_class( + params_to_optimize, + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, + ) + + # For mixed precision training we cast the text_encoder and vae weights to half-precision + # as these models are only used for inference, keeping weights in full precision is not required. + weight_dtype = torch.float32 + if accelerator.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif accelerator.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + + vae.to(accelerator.device, dtype=weight_dtype) + flux_transformer.to(accelerator.device, dtype=weight_dtype) + text_encoder_one.to(accelerator.device, dtype=weight_dtype) + text_encoder_two.to(accelerator.device, dtype=weight_dtype) + # flux_controlnet.to(accelerator.device, dtype=weight_dtype) + + def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline, weight_dtype, is_train=True): + prompt_batch = batch[args.caption_column] + captions = [] + for caption in prompt_batch: + if random.random() < proportion_empty_prompts: + captions.append("") + elif isinstance(caption, str): + captions.append(caption) + elif isinstance(caption, (list, np.ndarray)): + # take a random caption if there are multiple + captions.append(random.choice(caption) if is_train else caption[0]) + prompt_batch = captions + prompt_embeds, pooled_prompt_embeds, text_ids = flux_controlnet_pipeline.encode_prompt( + prompt_batch, prompt_2=prompt_batch + ) + prompt_embeds = prompt_embeds.to(dtype=weight_dtype) + pooled_prompt_embeds = pooled_prompt_embeds.to(dtype=weight_dtype) + text_ids = text_ids.to(dtype=weight_dtype) + + # text_ids [512,3] to [bs,512,3] + text_ids = text_ids.unsqueeze(0).expand(prompt_embeds.shape[0], -1, -1) + print(text_ids.shape) + # unet_added_cond_kwargs = {"pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids} + return {"prompt_embeds": prompt_embeds, "pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids} + + train_dataset = get_train_dataset(args, accelerator) + text_encoders = [text_encoder_one, text_encoder_two] + tokenizers = [tokenizer_one, tokenizer_two] + compute_embeddings_fn = functools.partial( + compute_embeddings, + flux_controlnet_pipeline=flux_controlnet_pipeline, + proportion_empty_prompts=args.proportion_empty_prompts, + weight_dtype=weight_dtype, + ) + with accelerator.main_process_first(): + from datasets.fingerprint import Hasher + + # fingerprint used by the cache for the other processes to load the result + # details: https://github.com/huggingface/diffusers/pull/4038#discussion_r1266078401 + new_fingerprint = Hasher.hash(args) + train_dataset = train_dataset.map( + compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint, batch_size=100 + ) + + del text_encoders, tokenizers + gc.collect() + torch.cuda.empty_cache() + + # Then get the training dataset ready to be passed to the dataloader. + train_dataset = prepare_train_dataset(train_dataset, accelerator) + + train_dataloader = torch.utils.data.DataLoader( + train_dataset, + shuffle=True, + collate_fn=collate_fn, + batch_size=args.train_batch_size, + num_workers=args.dataloader_num_workers, + ) + + # Scheduler and math around the number of training steps. + # Check the PR https://github.com/huggingface/diffusers/pull/8312 for detailed explanation. + num_warmup_steps_for_scheduler = args.lr_warmup_steps * accelerator.num_processes + if args.max_train_steps is None: + len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes) + num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps) + num_training_steps_for_scheduler = ( + args.num_train_epochs * num_update_steps_per_epoch * accelerator.num_processes + ) + else: + num_training_steps_for_scheduler = args.max_train_steps * accelerator.num_processes + + lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=optimizer, + num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, + num_training_steps=args.max_train_steps * accelerator.num_processes, + num_cycles=args.lr_num_cycles, + power=args.lr_power, + ) + # Prepare everything with our `accelerator`. + flux_controlnet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + flux_controlnet, optimizer, train_dataloader, lr_scheduler + ) + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + if num_training_steps_for_scheduler != args.max_train_steps * accelerator.num_processes: + logger.warning( + f"The length of the 'train_dataloader' after 'accelerator.prepare' ({len(train_dataloader)}) does not match " + f"the expected length ({len_train_dataloader_after_sharding}) when the learning rate scheduler was created. " + f"This inconsistency may result in the learning rate scheduler not functioning properly." + ) + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if accelerator.is_main_process: + tracker_config = dict(vars(args)) + + # tensorboard cannot handle list types for config + tracker_config.pop("validation_prompt") + tracker_config.pop("validation_image") + + accelerator.init_trackers(args.tracker_project_name, config=tracker_config) + + # Train! + total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num batches each epoch = {len(train_dataloader)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + global_step = 0 + first_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint != "latest": + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = os.listdir(args.output_dir) + dirs = [d for d in dirs if d.startswith("checkpoint")] + dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) + path = dirs[-1] if len(dirs) > 0 else None + + if path is None: + accelerator.print( + f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." + ) + args.resume_from_checkpoint = None + initial_global_step = 0 + else: + accelerator.print(f"Resuming from checkpoint {path}") + accelerator.load_state(os.path.join(args.output_dir, path)) + global_step = int(path.split("-")[1]) + + initial_global_step = global_step + first_epoch = global_step // num_update_steps_per_epoch + else: + initial_global_step = 0 + + progress_bar = tqdm( + range(0, args.max_train_steps), + initial=initial_global_step, + desc="Steps", + # Only show the progress bar once on each machine. + disable=not accelerator.is_local_main_process, + ) + + # copied from pipeline_flux_controlnet + def _prepare_latent_image_ids(batch_size, height, width, device, dtype): + latent_image_ids = torch.zeros(height // 2, width // 2, 3) + latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] + latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] + + latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape + + latent_image_ids = latent_image_ids[None, :].repeat(batch_size, 1, 1, 1) + latent_image_ids = latent_image_ids.reshape( + batch_size, latent_image_id_height * latent_image_id_width, latent_image_id_channels + ) + + return latent_image_ids.to(device=device, dtype=dtype) + + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + image_logs = None + for epoch in range(first_epoch, args.num_train_epochs): + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(flux_controlnet): + # Convert images to latent space + + # vae encode + pixel_values = batch["pixel_values"].to(dtype=weight_dtype) + pixel_latents_tmp = vae.encode(pixel_values).latent_dist.sample() + pixel_latents_tmp = (pixel_latents_tmp - vae.config.shift_factor) * vae.config.scaling_factor + pixel_latents = _pack_latents( + pixel_latents_tmp, + pixel_values.shape[0], + pixel_latents_tmp.shape[1], + pixel_latents_tmp.shape[2], + pixel_latents_tmp.shape[3], + ) + + control_values = batch["conditioning_pixel_values"].to(dtype=weight_dtype) + control_latents = vae.encode(control_values).latent_dist.sample() + control_latents = (control_latents - vae.config.shift_factor) * vae.config.scaling_factor + control_image = _pack_latents( + control_latents, + control_values.shape[0], + control_latents.shape[1], + control_latents.shape[2], + control_latents.shape[3], + ) + + latent_image_ids = _prepare_latent_image_ids( + batch_size=pixel_latents_tmp.shape[0], + height=pixel_latents_tmp.shape[2], + width=pixel_latents_tmp.shape[3], + device=pixel_values.device, + dtype=pixel_values.dtype, + ) + + # Sample noise that we'll add to the latents + noise = torch.randn_like(pixel_latents).to(accelerator.device).to(dtype=weight_dtype) + bsz = pixel_latents.shape[0] + + # Sample a random timestep for each image + t = torch.sigmoid(torch.randn((bsz,), device=accelerator.device, dtype=weight_dtype)) + + # apply flow matching + noisy_latents = ( + 1 - t.unsqueeze(1).unsqueeze(2).repeat(1, pixel_latents.shape[1], pixel_latents.shape[2]) + ) * pixel_latents + t.unsqueeze(1).unsqueeze(2).repeat( + 1, pixel_latents.shape[1], pixel_latents.shape[2] + ) * noise + + guidance_vec = torch.full( + (noisy_latents.shape[0],), 3.5, device=noisy_latents.device, dtype=weight_dtype + ) + + controlnet_block_samples, controlnet_single_block_samples = flux_controlnet( + hidden_states=noisy_latents, + controlnet_cond=control_image, + timestep=t, + guidance=guidance_vec, + pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), + encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), + txt_ids=batch["unet_added_conditions"]["time_ids"][0].to(dtype=weight_dtype), + img_ids=latent_image_ids[0], + return_dict=False, + ) + + noise_pred = flux_transformer( + hidden_states=noisy_latents, + timestep=t, + guidance=guidance_vec, + pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), + encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), + controlnet_block_samples=[sample.to(dtype=weight_dtype) for sample in controlnet_block_samples] + if controlnet_block_samples is not None + else None, + controlnet_single_block_samples=[ + sample.to(dtype=weight_dtype) for sample in controlnet_single_block_samples + ] + if controlnet_single_block_samples is not None + else None, + txt_ids=batch["unet_added_conditions"]["time_ids"][0].to(dtype=weight_dtype), + img_ids=latent_image_ids[0], + return_dict=False, + )[0] + + loss = F.mse_loss(noise_pred.float(), (noise - pixel_latents).float(), reduction="mean") + accelerator.backward(loss) + # Check if the gradient of each model parameter contains NaN + for name, param in flux_controlnet.named_parameters(): + if param.grad is not None and torch.isnan(param.grad).any(): + logger.error(f"Gradient for {name} contains NaN!") + + if accelerator.sync_gradients: + params_to_clip = flux_controlnet.parameters() + accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad(set_to_none=args.set_grads_to_none) + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + + # DeepSpeed requires saving weights on every device; saving weights only on the main process would cause issues. + if accelerator.distributed_type == DistributedType.DEEPSPEED or accelerator.is_main_process: + if global_step % args.checkpointing_steps == 0: + # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` + if args.checkpoints_total_limit is not None: + checkpoints = os.listdir(args.output_dir) + checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] + checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) + + # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints + if len(checkpoints) >= args.checkpoints_total_limit: + num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 + removing_checkpoints = checkpoints[0:num_to_remove] + + logger.info( + f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" + ) + logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") + + for removing_checkpoint in removing_checkpoints: + removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) + shutil.rmtree(removing_checkpoint) + + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + accelerator.save_state(save_path) + logger.info(f"Saved state to {save_path}") + + if args.validation_prompt is not None and global_step % args.validation_steps == 0: + image_logs = log_validation( + vae=vae, + flux_transformer=flux_transformer, + flux_controlnet=flux_controlnet, + args=args, + accelerator=accelerator, + weight_dtype=weight_dtype, + step=global_step, + ) + logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} + progress_bar.set_postfix(**logs) + accelerator.log(logs, step=global_step) + + if global_step >= args.max_train_steps: + break + # Create the pipeline using using the trained modules and save it. + accelerator.wait_for_everyone() + if accelerator.is_main_process: + flux_controlnet = unwrap_model(flux_controlnet) + flux_controlnet.save_pretrained(args.output_dir) + + # Run a final round of validation. + # Setting `vae`, `unet`, and `controlnet` to None to load automatically from `args.output_dir`. + image_logs = None + if args.validation_prompt is not None: + image_logs = log_validation( + vae=vae, + flux_transformer=flux_transformer, + flux_controlnet=None, + args=args, + accelerator=accelerator, + weight_dtype=weight_dtype, + step=global_step, + is_final_validation=True, + ) + accelerator.end_training() + + +if __name__ == "__main__": + args = parse_args() + main(args) From 4a535737f01b2546d1b4fdd956d47e2fb76fb371 Mon Sep 17 00:00:00 2001 From: PromeAIpro Date: Fri, 30 Aug 2024 10:42:28 +0800 Subject: [PATCH 02/40] fix error --- examples/controlnet/README_flux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index a7c82eef583e..bcc4a3b26e11 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -100,7 +100,7 @@ accelerate launch train_controlnet_flux.py \ To better track our training experiments, we're using the following flags in the command above: -* `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`. +* `report_to="tensorboard` will ensure the training runs are tracked on Weights and Biases. * `validation_image`, `validation_prompt`, and `validation_steps` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Our experiments were conducted on a single 40GB A100 GPU. From 14e9970e70614703160ec57f2a00372ff295f63f Mon Sep 17 00:00:00 2001 From: PromeAIpro Date: Sun, 1 Sep 2024 19:51:08 +0800 Subject: [PATCH 03/40] fix subfolder error --- examples/controlnet/train_controlnet_flux.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index bc430dd13223..e3419593c386 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -862,7 +862,7 @@ def load_model_hook(models, input_dir): model = models.pop() # load diffusers style into model - load_model = FluxControlNetModel.from_pretrained(input_dir, subfolder="controlnet") + load_model = FluxControlNetModel.from_pretrained(input_dir, subfolder="flux_controlnet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) From 973c6fb1fd232d5cf09db3591f082801a0b0a4de Mon Sep 17 00:00:00 2001 From: PromeAIpro Date: Wed, 4 Sep 2024 03:45:57 +0000 Subject: [PATCH 04/40] fix preprocess error --- examples/controlnet/train_controlnet_flux.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index e3419593c386..222d2c2038ec 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -656,6 +656,7 @@ def prepare_train_dataset(dataset, accelerator): transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution), transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), ] ) @@ -1028,7 +1029,6 @@ def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline # Scheduler and math around the number of training steps. # Check the PR https://github.com/huggingface/diffusers/pull/8312 for detailed explanation. - num_warmup_steps_for_scheduler = args.lr_warmup_steps * accelerator.num_processes if args.max_train_steps is None: len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes) num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps) From 57d143bbc39da6c7e2819afe5ed3fbf2da008df8 Mon Sep 17 00:00:00 2001 From: PromeAI Date: Fri, 13 Sep 2024 17:22:38 +0800 Subject: [PATCH 05/40] Update examples/controlnet/README_flux.md Co-authored-by: Sayak Paul --- examples/controlnet/README_flux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index bcc4a3b26e11..e4a76f726d35 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -2,7 +2,7 @@ The `train_controlnet_flux.py` script shows how to implement the ControlNet training procedure and adapt it for [FLUX](https://github.com/black-forest-labs/flux). -Training script provided by LibAI, which is an institution dedicated to the progress and achievement of artificial general intelligence.LibAI is the developer of [cutout.pro](https://www.cutout.pro/) and [promeai.pro](https://www.promeai.pro/). +Training script provided by LibAI, which is an institution dedicated to the progress and achievement of artificial general intelligence. LibAI is the developer of [cutout.pro](https://www.cutout.pro/) and [promeai.pro](https://www.promeai.pro/). ## Running locally with PyTorch From af1b7a5011bbe98c5909891a46bed888cfd302b3 Mon Sep 17 00:00:00 2001 From: PromeAI Date: Fri, 13 Sep 2024 17:23:36 +0800 Subject: [PATCH 06/40] Update examples/controlnet/README_flux.md Co-authored-by: Sayak Paul --- examples/controlnet/README_flux.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index e4a76f726d35..f2295c639363 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -118,10 +118,13 @@ from diffusers.models.controlnet_flux import FluxControlNetModel base_model = 'black-forest-labs/FLUX.1-dev' controlnet_model = 'path to controlnet' controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16) -pipe = FluxControlNetPipeline.from_pretrained(base_model, - controlnet=controlnet, - torch_dtype=torch.bfloat16) -pipe.to("cuda") +pipe = FluxControlNetPipeline.from_pretrained( + base_model, + controlnet=controlnet, + torch_dtype=torch.bfloat16 +) +# enable memory optimizations +pipe.enable_model_cpu_offload() control_image = load_image("./conditioning_image_1.png").resize((1024, 1024)) prompt = "pale golden rod circle with old lace background" From d19b101c2f49027dd2ce640329ea5f222dd3b78f Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 13 Sep 2024 05:38:39 -0400 Subject: [PATCH 07/40] fix readme --- examples/controlnet/README_flux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index f2295c639363..2a54efbc1f97 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -103,7 +103,7 @@ To better track our training experiments, we're using the following flags in the * `report_to="tensorboard` will ensure the training runs are tracked on Weights and Biases. * `validation_image`, `validation_prompt`, and `validation_steps` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. -Our experiments were conducted on a single 40GB A100 GPU. +Our experiments were conducted on a single 80GB A100 GPU. ### Inference From 64251ac5333365dafb5b8b5adb4b59154f52bc71 Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 13 Sep 2024 22:32:56 -0400 Subject: [PATCH 08/40] fix note error --- examples/controlnet/README_flux.md | 40 ------------------- examples/controlnet/train_controlnet_flux.py | 1 - .../flux/pipeline_flux_controlnet.py | 4 +- 3 files changed, 2 insertions(+), 43 deletions(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index 2a54efbc1f97..737161da7dc2 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -137,44 +137,4 @@ image = pipe( guidance_scale=3.5, ).images[0] image.save("./output.png") -``` - -## Notes - -### T5 dont support bf16 autocast and i dont know why, will cause black image. - -```diff -if is_final_validation or torch.backends.mps.is_available(): - autocast_ctx = nullcontext() -else: - # t5 seems not support autocast and i don't know why -+ autocast_ctx = nullcontext() -- autocast_ctx = torch.autocast(accelerator.device.type) -``` - -### TO Fix Error - -```bash -RuntimeError: mat1 and mat2 must have the same dtype, but got Float and BFloat16 -``` - -#### we need to change some code in `diffusers/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py` to ensure the dtype - -```diff -noise_pred = self.transformer( - hidden_states=latents, - # YiYi notes: divide it by 1000 for now because we scale it by 1000 in the transforme rmodel (we should not keep it but I want to keep the inputs same for the model for testing) - timestep=timestep / 1000, - guidance=guidance, - pooled_projections=pooled_prompt_embeds, - encoder_hidden_states=prompt_embeds, -- controlnet_block_samples=controlnet_block_samples, -- controlnet_single_block_samples=controlnet_single_block_samples, -+ controlnet_block_samples=[sample.to(dtype=latents.dtype) for sample in controlnet_block_samples]if controlnet_block_samples is not None else None, -+ controlnet_single_block_samples=[sample.to(dtype=latents.dtype) for sample in controlnet_single_block_samples] if controlnet_single_block_samples is not None else None, - txt_ids=text_ids, - img_ids=latent_image_ids, - joint_attention_kwargs=self.joint_attention_kwargs, - return_dict=False, -)[0] ``` \ No newline at end of file diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 222d2c2038ec..d7eb8f8ff188 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -125,7 +125,6 @@ def log_validation( else: # t5 seems not support autocast and i don't know why autocast_ctx = nullcontext() - # autocast_ctx = torch.autocast(accelerator.device.type) for validation_prompt, validation_image in zip(validation_prompts, validation_images): diff --git a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py index 481994903d3f..c62be26a95b4 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py @@ -869,8 +869,8 @@ def __call__( guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, - controlnet_block_samples=controlnet_block_samples, - controlnet_single_block_samples=controlnet_single_block_samples, + controlnet_block_samples=[sample.to(dtype=latents.dtype) for sample in controlnet_block_samples]if controlnet_block_samples is not None else None, + controlnet_single_block_samples=[sample.to(dtype=latents.dtype) for sample in controlnet_single_block_samples] if controlnet_single_block_samples is not None else None, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, From c98d43f8031d33441767bf2261f6fca49047b043 Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 13 Sep 2024 23:56:43 -0400 Subject: [PATCH 09/40] add some Tutorial for deepspeed --- examples/controlnet/README_flux.md | 282 +++++++++++++++++++++++++++++ 1 file changed, 282 insertions(+) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index 737161da7dc2..81562b82d229 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -137,4 +137,286 @@ image = pipe( guidance_scale=3.5, ).images[0] image.save("./output.png") +``` + +## Apply Deepspeed Zero3 + +This is an experimental process, I am not sure if it is suitable for everyone, we used this process to successfully train 512 resolution on A100(40g) * 8. +Please modify some of the code in the script. +### 1.Customize zero3 settings + +Copy the accelerate_config_zero3.yaml,modify `num_processes` according to the number of gpu you want to use: + +```bash +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + gradient_accumulation_steps: 8 + offload_optimizer_device: cpu + offload_param_device: cpu + zero3_init_flag: true + zero3_save_16bit_model: true + zero_stage: 3 +distributed_type: DEEPSPEED +downcast_bf16: 'no' +enable_cpu_affinity: false +machine_rank: 0 +main_training_function: main +mixed_precision: bf16 +num_machines: 1 +num_processes: 8 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false +``` + +### 2.Precompute all inputs (latent, embeddings) + +In the **train_controlnet_flux.py**, We need to pre-calculate all parameters and put them into batches.So we first need to rewrite the compute_embeddings function. + +```python +def compute_embeddings(batch, proportion_empty_prompts, vae, flux_controlnet_pipeline, weight_dtype, is_train=True): + + ### compute text embeddings + prompt_batch = batch[args.caption_column] + captions = [] + for caption in prompt_batch: + if random.random() < proportion_empty_prompts: + captions.append("") + elif isinstance(caption, str): + captions.append(caption) + elif isinstance(caption, (list, np.ndarray)): + # take a random caption if there are multiple + captions.append(random.choice(caption) if is_train else caption[0]) + prompt_batch = captions + prompt_embeds, pooled_prompt_embeds, text_ids = flux_controlnet_pipeline.encode_prompt( + prompt_batch, prompt_2=prompt_batch + ) + prompt_embeds = prompt_embeds.to(dtype=weight_dtype) + pooled_prompt_embeds = pooled_prompt_embeds.to(dtype=weight_dtype) + text_ids = text_ids.to(dtype=weight_dtype) + + # text_ids [512,3] to [bs,512,3] + text_ids = text_ids.unsqueeze(0).expand(prompt_embeds.shape[0], -1, -1) + + ### compute latents + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + return latents + + # vae encode + pixel_values = batch["pixel_values"] + pixel_values = torch.stack([image for image in pixel_values]).to(dtype=weight_dtype).to(vae.device) + pixel_latents_tmp = vae.encode(pixel_values).latent_dist.sample() + pixel_latents_tmp = (pixel_latents_tmp - vae.config.shift_factor) * vae.config.scaling_factor + pixel_latents = _pack_latents( + pixel_latents_tmp, + pixel_values.shape[0], + pixel_latents_tmp.shape[1], + pixel_latents_tmp.shape[2], + pixel_latents_tmp.shape[3], + ) + + control_values = batch["conditioning_pixel_values"] + control_values = torch.stack([image for image in control_values]).to(dtype=weight_dtype).to(vae.device) + control_latents = vae.encode(control_values).latent_dist.sample() + control_latents = (control_latents - vae.config.shift_factor) * vae.config.scaling_factor + control_latents = _pack_latents( + control_latents, + control_values.shape[0], + control_latents.shape[1], + control_latents.shape[2], + control_latents.shape[3], + ) + + # copied from pipeline_flux_controlnet + def _prepare_latent_image_ids(batch_size, height, width, device, dtype): + latent_image_ids = torch.zeros(height // 2, width // 2, 3) + latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] + latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] + + latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape + + latent_image_ids = latent_image_ids[None, :].repeat(batch_size, 1, 1, 1) + latent_image_ids = latent_image_ids.reshape( + batch_size, latent_image_id_height * latent_image_id_width, latent_image_id_channels + ) + + return latent_image_ids.to(device=device, dtype=dtype) + latent_image_ids = _prepare_latent_image_ids( + batch_size=pixel_latents_tmp.shape[0], + height=pixel_latents_tmp.shape[2], + width=pixel_latents_tmp.shape[3], + device=pixel_values.device, + dtype=pixel_values.dtype, + ) + + # unet_added_cond_kwargs = {"pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids} + return {"prompt_embeds": prompt_embeds, "pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids, "pixel_latents": pixel_latents, "control_latents": control_latents, "latent_image_ids": latent_image_ids} +``` + +Because we need images to pass through vae, we need to preprocess the images in the dataset first. At the same time, vae requires more video memory, so you may need to modify the batch_size below +```diff ++train_dataset = prepare_train_dataset(train_dataset, accelerator) +with accelerator.main_process_first(): + from datasets.fingerprint import Hasher + + # fingerprint used by the cache for the other processes to load the result + # details: https://github.com/huggingface/diffusers/pull/4038#discussion_r1266078401 + new_fingerprint = Hasher.hash(args) + train_dataset = train_dataset.map( +- compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint, batch_size=100 ++ compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint, batch_size=10 + ) + +del text_encoders, tokenizers +gc.collect() +torch.cuda.empty_cache() + +# Then get the training dataset ready to be passed to the dataloader. +-train_dataset = prepare_train_dataset(train_dataset, accelerator) +``` +### 3.Redefine the behavior of getting batchsize + +Now that we have all the preprocessing done, we need to modify the collate_fn function. + +```python +def collate_fn(examples): + pixel_values = torch.stack([example["pixel_values"] for example in examples]) + pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() + + conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples]) + conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float() + + pixel_latents = torch.stack([torch.tensor(example["pixel_latents"]) for example in examples]) + pixel_latents = pixel_latents.to(memory_format=torch.contiguous_format).float() + + control_latents = torch.stack([torch.tensor(example["control_latents"]) for example in examples]) + control_latents = control_latents.to(memory_format=torch.contiguous_format).float() + + latent_image_ids= torch.stack([torch.tensor(example["latent_image_ids"]) for example in examples]) + + prompt_ids = torch.stack([torch.tensor(example["prompt_embeds"]) for example in examples]) + + pooled_prompt_embeds = torch.stack([torch.tensor(example["pooled_prompt_embeds"]) for example in examples]) + text_ids = torch.stack([torch.tensor(example["text_ids"]) for example in examples]) + + return { + "pixel_values": pixel_values, + "conditioning_pixel_values": conditioning_pixel_values, + "pixel_latents": pixel_latents, + "control_latents": control_latents, + "latent_image_ids": latent_image_ids, + "prompt_ids": prompt_ids, + "unet_added_conditions": {"pooled_prompt_embeds": pooled_prompt_embeds, "time_ids": text_ids}, + } +``` +Finally, we just need to modify the way of obtaining various parameters during training. +```python +for epoch in range(first_epoch, args.num_train_epochs): + for step, batch in enumerate(train_dataloader): + with accelerator.accumulate(flux_controlnet): + # Convert images to latent space + pixel_latents = batch["pixel_latents"].to(dtype=weight_dtype) + control_image = batch["control_latents"].to(dtype=weight_dtype) + latent_image_ids = batch["latent_image_ids"].to(dtype=weight_dtype) + + # Sample noise that we'll add to the latents + noise = torch.randn_like(pixel_latents).to(accelerator.device).to(dtype=weight_dtype) + bsz = pixel_latents.shape[0] + + # Sample a random timestep for each image + t = torch.sigmoid(torch.randn((bsz,), device=accelerator.device, dtype=weight_dtype)) + + # apply flow matching + noisy_latents = ( + 1 - t.unsqueeze(1).unsqueeze(2).repeat(1, pixel_latents.shape[1], pixel_latents.shape[2]) + ) * pixel_latents + t.unsqueeze(1).unsqueeze(2).repeat( + 1, pixel_latents.shape[1], pixel_latents.shape[2] + ) * noise + + guidance_vec = torch.full( + (noisy_latents.shape[0],), 3.5, device=noisy_latents.device, dtype=weight_dtype + ) + + controlnet_block_samples, controlnet_single_block_samples = flux_controlnet( + hidden_states=noisy_latents, + controlnet_cond=control_image, + timestep=t, + guidance=guidance_vec, + pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), + encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), + txt_ids=batch["unet_added_conditions"]["time_ids"][0].to(dtype=weight_dtype), + img_ids=latent_image_ids[0], + return_dict=False, + ) + + noise_pred = flux_transformer( + hidden_states=noisy_latents, + timestep=t, + guidance=guidance_vec, + pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), + encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), + controlnet_block_samples=[sample.to(dtype=weight_dtype) for sample in controlnet_block_samples] + if controlnet_block_samples is not None + else None, + controlnet_single_block_samples=[ + sample.to(dtype=weight_dtype) for sample in controlnet_single_block_samples + ] + if controlnet_single_block_samples is not None + else None, + txt_ids=batch["unet_added_conditions"]["time_ids"][0].to(dtype=weight_dtype), + img_ids=latent_image_ids[0], + return_dict=False, + )[0] +``` +Congratulations! You have completed all the required code modifications required for deepspeedzero3. + +### 4.Training with deepspeedzero3 + +Start!!! + +```bash +export pretrained_model_name_or_path='flux-dev-model-path' +export MODEL_TYPE='train_model_type' +export TRAIN_JSON_FILE="your_json_file" +export CONTROL_TYPE='control_preprocessor_type' +export CAPTION_COLUMN='caption_column' + +export CACHE_DIR="/data/train_csr/.cache/huggingface/" +export OUTPUT_DIR='/data/train_csr/FLUX/MODEL_OUT/'$MODEL_TYPE +# The first step is to use Python to precompute all caches.Replace the first line below with this line. (I am not sure why using acclerate would cause problems.) + +CUDA_VISIBLE_DEVICES=0 python3 train_controlnet_flux.py \ + +# The second step is to use the above accelerate config to train +accelerate launch --config_file "./accelerate_config_zero3.yaml" train_controlnet_flux.py \ + --pretrained_model_name_or_path=$pretrained_model_name_or_path \ + --jsonl_for_train=$TRAIN_JSON_FILE \ + --conditioning_image_column=$CONTROL_TYPE \ + --image_column=image \ + --caption_column=$CAPTION_COLUMN\ + --cache_dir=$CACHE_DIR \ + --tracker_project_name=$MODEL_TYPE \ + --output_dir=$OUTPUT_DIR \ + --max_train_steps=500000 \ + --mixed_precision bf16 \ + --checkpointing_steps=1000 \ + --gradient_accumulation_steps=8 \ + --resolution=512 \ + --train_batch_size=1 \ + --learning_rate=1e-5 \ + --num_double_layers=4 \ + --num_single_layers=0 \ + --gradient_checkpointing \ + --resume_from_checkpoint="latest" \ + # --use_adafactor \ dont use + # --validation_steps=3 \ not support + # --validation_image $VALIDATION_IMAGE \ not support + # --validation_prompt "xxx" \ not support ``` \ No newline at end of file From 569e0de85cfae072657fa47781f725fcb4155047 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 14 Sep 2024 00:04:08 -0400 Subject: [PATCH 10/40] fix some Format Error --- examples/controlnet/README_flux.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index 81562b82d229..bd38c4cf67ae 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -145,7 +145,7 @@ This is an experimental process, I am not sure if it is suitable for everyone, w Please modify some of the code in the script. ### 1.Customize zero3 settings -Copy the accelerate_config_zero3.yaml,modify `num_processes` according to the number of gpu you want to use: +Copy the **accelerate_config_zero3.yaml**,modify `num_processes` according to the number of gpus you want to use: ```bash compute_environment: LOCAL_MACHINE @@ -175,7 +175,7 @@ use_cpu: false ### 2.Precompute all inputs (latent, embeddings) -In the **train_controlnet_flux.py**, We need to pre-calculate all parameters and put them into batches.So we first need to rewrite the compute_embeddings function. +In the train_controlnet_flux.py, We need to pre-calculate all parameters and put them into batches.So we first need to rewrite the `compute_embeddings` function. ```python def compute_embeddings(batch, proportion_empty_prompts, vae, flux_controlnet_pipeline, weight_dtype, is_train=True): @@ -260,7 +260,7 @@ def compute_embeddings(batch, proportion_empty_prompts, vae, flux_controlnet_pip return {"prompt_embeds": prompt_embeds, "pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids, "pixel_latents": pixel_latents, "control_latents": control_latents, "latent_image_ids": latent_image_ids} ``` -Because we need images to pass through vae, we need to preprocess the images in the dataset first. At the same time, vae requires more video memory, so you may need to modify the batch_size below +Because we need images to pass through vae, we need to preprocess the images in the dataset first. At the same time, vae requires more gpu memory, so you may need to modify the `batch_size` below ```diff +train_dataset = prepare_train_dataset(train_dataset, accelerator) with accelerator.main_process_first(): @@ -283,7 +283,7 @@ torch.cuda.empty_cache() ``` ### 3.Redefine the behavior of getting batchsize -Now that we have all the preprocessing done, we need to modify the collate_fn function. +Now that we have all the preprocessing done, we need to modify the `collate_fn` function. ```python def collate_fn(examples): From 67deb7a696a850988f34e06aede7e0d9cc891d67 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 14 Sep 2024 23:59:12 -0400 Subject: [PATCH 11/40] add dataset_path example --- examples/controlnet/README_flux.md | 22 +++++++++++++------- examples/controlnet/train_controlnet_flux.py | 15 ++++++++++--- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index bd38c4cf67ae..cb8b338f538b 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -48,13 +48,21 @@ When running `accelerate config`, if we specify torch compile mode to True there ## Custom Datasets -We support importing data from jsonl(xxx.jsonl),here is a brief example: +We support dataset formats: +The original dataset is hosted in the [ControlNet repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip). We re-uploaded it to be compatible with `datasets` [here](https://huggingface.co/datasets/fusing/fill50k). Note that `datasets` handles dataloading within the training script, To use our example, add `--dataset_name=fusing/fill50k \` to the script and remove line `--jsonl_for_train` mentioned below. + + +We also support importing data from jsonl(xxx.jsonl),using `--jsonl_for_train` to enable it, here is a brief example of jsonl files: ```sh -{"image_path": "xxx", "caption": "xxx", "control_path": "xxx"} -{"image_path": "xxx", "caption": "xxx", "control_path": "xxx"} +{"image": "xxx", "text": "xxx", "conditioning_image": "xxx"} +{"image": "xxx", "text": "xxx", "conditioning_image": "xxx"} ``` + + + + ## Training Our training examples use two test conditioning images. They can be downloaded by running @@ -77,11 +85,11 @@ export TRAIN_JSON_FILE="path to your jsonl file" accelerate launch train_controlnet_flux.py \ --pretrained_model_name_or_path=$MODEL_DIR \ - --conditioning_image_column=control_path \ - --image_column=image_path \ - --caption_column=caption \ + --dataset_name=fusing/fill50k \ + --conditioning_image_column=conditioning_image \ + --image_column=image \ + --caption_column=text \ --output_dir=$OUTPUT_DIR \ - --jsonl_for_train=$TRAIN_JSON_FILE \ --mixed_precision="bf16" \ --resolution=512 \ --learning_rate=1e-5 \ diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index d7eb8f8ff188..4ce9e841fdbb 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -595,9 +595,18 @@ def parse_args(input_args=None): def get_train_dataset(args, accelerator): - # load from json - dataset = load_dataset("json", data_files=args.jsonl_for_train, cache_dir=args.cache_dir) - dataset = dataset.flatten_indices() + dataset = None + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + dataset = load_dataset( + args.dataset_name, + args.dataset_config_name, + cache_dir=args.cache_dir, + ) + if args.jsonl_for_train is not None: + # load from json + dataset = load_dataset("json", data_files=args.jsonl_for_train, cache_dir=args.cache_dir) + dataset = dataset.flatten_indices() # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names From 32fbeac2bfd7a5434b6c05b581dff608730b56e3 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 15 Sep 2024 00:30:08 -0400 Subject: [PATCH 12/40] remove print, add guidance_scale CLI, readable apply --- examples/controlnet/train_controlnet_flux.py | 11 ++++++++--- .../pipelines/flux/pipeline_flux_controlnet.py | 7 +++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 4ce9e841fdbb..6cecbe8efbce 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -554,6 +554,13 @@ def parse_args(input_args=None): help="Path to the jsonl file containing the training data.", ) + parser.add_argument( + "--guidance_scale", + type=float, + default=3.5, + help="the guidance scale used for transformer.", + ) + if input_args is not None: args = parser.parse_args(input_args) else: @@ -669,7 +676,6 @@ def prepare_train_dataset(dataset, accelerator): ) def preprocess_train(examples): - # print(f"{examples[args.image_column]=}") images = [ (image.convert("RGB") if not isinstance(image, str) else Image.open(image).convert("RGB")) for image in examples[args.image_column] @@ -997,7 +1003,6 @@ def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline # text_ids [512,3] to [bs,512,3] text_ids = text_ids.unsqueeze(0).expand(prompt_embeds.shape[0], -1, -1) - print(text_ids.shape) # unet_added_cond_kwargs = {"pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids} return {"prompt_embeds": prompt_embeds, "pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids} @@ -1206,7 +1211,7 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width): ) * noise guidance_vec = torch.full( - (noisy_latents.shape[0],), 3.5, device=noisy_latents.device, dtype=weight_dtype + (noisy_latents.shape[0],), args.guidance_scale, device=noisy_latents.device, dtype=weight_dtype ) controlnet_block_samples, controlnet_single_block_samples = flux_controlnet( diff --git a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py index c62be26a95b4..2465f6963d56 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py @@ -862,6 +862,9 @@ def __call__( joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, ) + # ensure dtype + processed_controlnet_block_samples = [sample.to(dtype=latents.dtype) for sample in controlnet_block_samples] if controlnet_block_samples is not None else None + processed_controlnet_single_block_samples = [sample.to(dtype=latents.dtype) for sample in controlnet_single_block_samples] if controlnet_single_block_samples is not None else None noise_pred = self.transformer( hidden_states=latents, @@ -869,8 +872,8 @@ def __call__( guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, - controlnet_block_samples=[sample.to(dtype=latents.dtype) for sample in controlnet_block_samples]if controlnet_block_samples is not None else None, - controlnet_single_block_samples=[sample.to(dtype=latents.dtype) for sample in controlnet_single_block_samples] if controlnet_single_block_samples is not None else None, + controlnet_block_samples=processed_controlnet_block_samples, + controlnet_single_block_samples=processed_controlnet_single_block_samples, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, From b03cb01c5b8580e6f89125d8ebbae5dda9a4c41a Mon Sep 17 00:00:00 2001 From: PromeAI Date: Sun, 15 Sep 2024 23:24:52 +0800 Subject: [PATCH 13/40] Update examples/controlnet/README_flux.md Co-authored-by: Sayak Paul --- examples/controlnet/README_flux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index cb8b338f538b..d18d0eaf4159 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -49,7 +49,7 @@ When running `accelerate config`, if we specify torch compile mode to True there ## Custom Datasets We support dataset formats: -The original dataset is hosted in the [ControlNet repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip). We re-uploaded it to be compatible with `datasets` [here](https://huggingface.co/datasets/fusing/fill50k). Note that `datasets` handles dataloading within the training script, To use our example, add `--dataset_name=fusing/fill50k \` to the script and remove line `--jsonl_for_train` mentioned below. +The original dataset is hosted in the [ControlNet repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip). We re-uploaded it to be compatible with `datasets` [here](https://huggingface.co/datasets/fusing/fill50k). Note that `datasets` handles dataloading within the training script. To use our example, add `--dataset_name=fusing/fill50k \` to the script and remove line `--jsonl_for_train` mentioned below. We also support importing data from jsonl(xxx.jsonl),using `--jsonl_for_train` to enable it, here is a brief example of jsonl files: From 443f251ffb18edd01d3a7a25f84c147fb8b86f35 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 15 Sep 2024 23:03:11 -0400 Subject: [PATCH 14/40] update,push_to_hub,save_weight_dtype,static method,clear_objs_and_retain_memory,report_to=wandb --- examples/controlnet/README_flux.md | 18 +-- examples/controlnet/train_controlnet_flux.py | 154 ++++++++++++------- 2 files changed, 102 insertions(+), 70 deletions(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index d18d0eaf4159..6b9eb33184da 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -58,11 +58,6 @@ We also support importing data from jsonl(xxx.jsonl),using `--jsonl_for_train` t {"image": "xxx", "text": "xxx", "conditioning_image": "xxx"} ``` - - - - - ## Training Our training examples use two test conditioning images. They can be downloaded by running @@ -78,18 +73,13 @@ we can define the num_layers, num_single_layers, which determines the size of th ```bash -export MODEL_DIR="black-forest-labs/FLUX.1-dev" -export OUTPUT_DIR="path to save model" -export TRAIN_JSON_FILE="path to your jsonl file" - - accelerate launch train_controlnet_flux.py \ - --pretrained_model_name_or_path=$MODEL_DIR \ + --pretrained_model_name_or_path="black-forest-labs/FLUX.1-dev" \ --dataset_name=fusing/fill50k \ --conditioning_image_column=conditioning_image \ --image_column=image \ --caption_column=text \ - --output_dir=$OUTPUT_DIR \ + --output_dir="path to save model" \ --mixed_precision="bf16" \ --resolution=512 \ --learning_rate=1e-5 \ @@ -100,7 +90,7 @@ accelerate launch train_controlnet_flux.py \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ - --report_to="tensorboard" \ + --report_to="wandb" \ --num_double_layers=4 \ --num_single_layers=0 \ --seed=42 \ @@ -108,7 +98,7 @@ accelerate launch train_controlnet_flux.py \ To better track our training experiments, we're using the following flags in the command above: -* `report_to="tensorboard` will ensure the training runs are tracked on Weights and Biases. +* `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. * `validation_image`, `validation_prompt`, and `validation_steps` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Our experiments were conducted on a single 80GB A100 GPU. diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 6cecbe8efbce..c2e95472b114 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -34,7 +34,7 @@ from accelerate.logging import get_logger from accelerate.utils import DistributedType, ProjectConfiguration, set_seed from datasets import load_dataset -from huggingface_hub import create_repo +from huggingface_hub import create_repo, upload_folder from packaging import version from PIL import Image from torchvision import transforms @@ -54,10 +54,11 @@ from diffusers.models.controlnet_flux import FluxControlNetModel from diffusers.optimization import get_scheduler from diffusers.pipelines.flux.pipeline_flux_controlnet import FluxControlNetPipeline -from diffusers.utils import check_min_version, is_wandb_available +from diffusers.utils import check_min_version, is_wandb_available, make_image_grid +from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_torch_npu_available, is_xformers_available from diffusers.utils.torch_utils import is_compiled_module - +from diffusers.training_utils import clear_objs_and_retain_memory if is_wandb_available(): import wandb @@ -85,7 +86,7 @@ def log_validation( torch_dtype=torch.bfloat16, ) else: - flux_controlnet = FluxControlNetModel.from_pretrained(args.output_dir, torch_dtype=torch.bfloat16) + flux_controlnet = FluxControlNetModel.from_pretrained(args.output_dir, torch_dtype=torch.bfloat16, variant=args.save_weight_dtype) pipeline = FluxControlNetPipeline.from_pretrained( args.pretrained_model_name_or_path, controlnet=flux_controlnet, @@ -198,6 +199,48 @@ def log_validation( return image_logs +def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None): + img_str = "" + if image_logs is not None: + img_str = "You can find some example images below.\n\n" + for i, log in enumerate(image_logs): + images = log["images"] + validation_prompt = log["validation_prompt"] + validation_image = log["validation_image"] + validation_image.save(os.path.join(repo_folder, "image_control.png")) + img_str += f"prompt: {validation_prompt}\n" + images = [validation_image] + images + make_image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png")) + img_str += f"![images_{i})](./images_{i}.png)\n" + + model_description = f""" +# controlnet-{repo_id} + +These are controlnet weights trained on {base_model} with new type of conditioning. +{img_str} +""" + + model_card = load_or_create_model_card( + repo_id_or_path=repo_id, + from_training=True, + license="openrail++", + base_model=base_model, + model_description=model_description, + inference=True, + ) + + tags = [ + "flux", + "flux-diffusers", + "text-to-image", + "diffusers", + "controlnet", + "diffusers-training", + ] + model_card = populate_model_card(model_card, tags=tags) + + model_card.save(os.path.join(repo_folder, "README.md")) + def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.") parser.add_argument( @@ -399,7 +442,7 @@ def parse_args(input_args=None): parser.add_argument( "--report_to", type=str, - default="tensorboard", + default="wandb", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' @@ -447,16 +490,6 @@ def parse_args(input_args=None): default=None, help="The config of the Dataset, leave as None if there's only one config.", ) - parser.add_argument( - "--train_data_dir", - type=str, - default=None, - help=( - "A folder containing the training data. Folder contents must follow the structure described in" - " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" - " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." - ), - ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image." ) @@ -541,7 +574,7 @@ def parse_args(input_args=None): parser.add_argument( "--tracker_project_name", type=str, - default="sd_xl_train_controlnet", + default="flux_train_controlnet", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" @@ -560,17 +593,27 @@ def parse_args(input_args=None): default=3.5, help="the guidance scale used for transformer.", ) + + parser.add_argument( + "--save_weight_dtype", + type=str, + default="fp32", + choices=["fp16", "bf16", "fp32",], + help=( + "Preserve precision type according to selected weight" + ), + ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() - # if args.dataset_name is None and args.train_data_dir is None: - # raise ValueError("Specify either `--dataset_name` or `--train_data_dir`") + if args.dataset_name is None and args.jsonl_for_train is None: + raise ValueError("Specify either `--dataset_name` or `--jsonl_for_train`") - # if args.dataset_name is not None and args.train_data_dir is not None: - # raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`") + if args.dataset_name is not None and args.jsonl_for_train is not None: + raise ValueError("Specify only one of `--dataset_name` or `--jsonl_for_train`") if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") @@ -719,11 +762,11 @@ def collate_fn(examples): def main(args): - # if args.report_to == "wandb" and args.hub_token is not None: - # raise ValueError( - # "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." - # " Please use `huggingface-cli login` to authenticate with the Hub." - # ) + if args.report_to == "wandb" and args.hub_token is not None: + raise ValueError( + "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." + " Please use `huggingface-cli login` to authenticate with the Hub." + ) logging_out_dir = Path(args.output_dir, args.logging_dir) @@ -1025,9 +1068,8 @@ def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint, batch_size=100 ) - del text_encoders, tokenizers - gc.collect() - torch.cuda.empty_cache() + + clear_objs_and_retain_memory([text_encoders, tokenizers]) # Then get the training dataset ready to be passed to the dataloader. train_dataset = prepare_train_dataset(train_dataset, accelerator) @@ -1137,28 +1179,6 @@ def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline disable=not accelerator.is_local_main_process, ) - # copied from pipeline_flux_controlnet - def _prepare_latent_image_ids(batch_size, height, width, device, dtype): - latent_image_ids = torch.zeros(height // 2, width // 2, 3) - latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] - latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] - - latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape - - latent_image_ids = latent_image_ids[None, :].repeat(batch_size, 1, 1, 1) - latent_image_ids = latent_image_ids.reshape( - batch_size, latent_image_id_height * latent_image_id_width, latent_image_id_channels - ) - - return latent_image_ids.to(device=device, dtype=dtype) - - def _pack_latents(latents, batch_size, num_channels_latents, height, width): - latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) - latents = latents.permute(0, 2, 4, 1, 3, 5) - latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) - - return latents - image_logs = None for epoch in range(first_epoch, args.num_train_epochs): for step, batch in enumerate(train_dataloader): @@ -1169,7 +1189,7 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width): pixel_values = batch["pixel_values"].to(dtype=weight_dtype) pixel_latents_tmp = vae.encode(pixel_values).latent_dist.sample() pixel_latents_tmp = (pixel_latents_tmp - vae.config.shift_factor) * vae.config.scaling_factor - pixel_latents = _pack_latents( + pixel_latents = FluxControlNetPipeline._pack_latents( pixel_latents_tmp, pixel_values.shape[0], pixel_latents_tmp.shape[1], @@ -1180,7 +1200,7 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width): control_values = batch["conditioning_pixel_values"].to(dtype=weight_dtype) control_latents = vae.encode(control_values).latent_dist.sample() control_latents = (control_latents - vae.config.shift_factor) * vae.config.scaling_factor - control_image = _pack_latents( + control_image = FluxControlNetPipeline._pack_latents( control_latents, control_values.shape[0], control_latents.shape[1], @@ -1188,7 +1208,7 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width): control_latents.shape[3], ) - latent_image_ids = _prepare_latent_image_ids( + latent_image_ids = FluxControlNetPipeline._prepare_latent_image_ids( batch_size=pixel_latents_tmp.shape[0], height=pixel_latents_tmp.shape[2], width=pixel_latents_tmp.shape[3], @@ -1222,7 +1242,7 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width): pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), txt_ids=batch["unet_added_conditions"]["time_ids"][0].to(dtype=weight_dtype), - img_ids=latent_image_ids[0], + img_ids=latent_image_ids, return_dict=False, ) @@ -1241,7 +1261,7 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width): if controlnet_single_block_samples is not None else None, txt_ids=batch["unet_added_conditions"]["time_ids"][0].to(dtype=weight_dtype), - img_ids=latent_image_ids[0], + img_ids=latent_image_ids, return_dict=False, )[0] @@ -1311,7 +1331,13 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width): accelerator.wait_for_everyone() if accelerator.is_main_process: flux_controlnet = unwrap_model(flux_controlnet) - flux_controlnet.save_pretrained(args.output_dir) + save_weight_dtype = torch.float32 + if args.save_weight_dtype == "fp16": + save_weight_dtype = torch.float16 + elif args.save_weight_dtype == "bf16": + save_weight_dtype = torch.bfloat16 + flux_controlnet.to(save_weight_dtype) + flux_controlnet.save_pretrained(args.output_dir,variant=args.save_weight_dtype) # Run a final round of validation. # Setting `vae`, `unet`, and `controlnet` to None to load automatically from `args.output_dir`. @@ -1327,6 +1353,22 @@ def _pack_latents(latents, batch_size, num_channels_latents, height, width): step=global_step, is_final_validation=True, ) + + if args.push_to_hub: + save_model_card( + repo_id, + image_logs=image_logs, + base_model=args.pretrained_model_name_or_path, + repo_folder=args.output_dir, + ) + + upload_folder( + repo_id=repo_id, + folder_path=args.output_dir, + commit_message="End of training", + ignore_patterns=["step_*", "epoch_*"], + ) + accelerator.end_training() From bc68f1a770d969c2265ad77d19f5cc417d597c4e Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 15 Sep 2024 23:09:48 -0400 Subject: [PATCH 15/40] add push to hub in readme --- examples/controlnet/README_flux.md | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index 6b9eb33184da..3887f6dfb729 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -94,6 +94,7 @@ accelerate launch train_controlnet_flux.py \ --num_double_layers=4 \ --num_single_layers=0 \ --seed=42 \ + --push_to_hub \ ``` To better track our training experiments, we're using the following flags in the command above: From fe2a58715e8be8d2173f6aa34952c8afc4c5eb64 Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 16 Sep 2024 05:32:08 -0400 Subject: [PATCH 16/40] apply weighting schemes --- examples/controlnet/train_controlnet_flux.py | 79 +++++++++++++++----- 1 file changed, 59 insertions(+), 20 deletions(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index c2e95472b114..0cf322a80141 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -23,7 +23,7 @@ import shutil from contextlib import nullcontext from pathlib import Path - +import copy import accelerate import numpy as np import torch @@ -58,7 +58,7 @@ from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_torch_npu_available, is_xformers_available from diffusers.utils.torch_utils import is_compiled_module -from diffusers.training_utils import clear_objs_and_retain_memory +from diffusers.training_utils import clear_objs_and_retain_memory, compute_density_for_timestep_sampling if is_wandb_available(): import wandb @@ -218,12 +218,16 @@ def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=N These are controlnet weights trained on {base_model} with new type of conditioning. {img_str} + +## License + +Please adhere to the licensing terms as described [here](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md) """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, - license="openrail++", + license="other", base_model=base_model, model_description=model_description, inference=True, @@ -604,6 +608,27 @@ def parse_args(input_args=None): ), ) + parser.add_argument( + "--weighting_scheme", + type=str, + default="none", + choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], + help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), + ) + parser.add_argument( + "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme." + ) + parser.add_argument( + "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme." + ) + parser.add_argument( + "--mode_scale", + type=float, + default=1.29, + help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", + ) + + if input_args is not None: args = parser.parse_args(input_args) else: @@ -874,7 +899,7 @@ def main(args): args.pretrained_model_name_or_path, subfolder="scheduler", ) - + noise_scheduler_copy = copy.deepcopy(noise_scheduler) vae.requires_grad_(False) flux_transformer.requires_grad_(False) text_encoder_one.requires_grad_(False) @@ -1179,12 +1204,22 @@ def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline disable=not accelerator.is_local_main_process, ) + def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): + sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype) + schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device) + timesteps = timesteps.to(accelerator.device) + step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < n_dim: + sigma = sigma.unsqueeze(-1) + return sigma + image_logs = None for epoch in range(first_epoch, args.num_train_epochs): for step, batch in enumerate(train_dataloader): with accelerator.accumulate(flux_controlnet): # Convert images to latent space - # vae encode pixel_values = batch["pixel_values"].to(dtype=weight_dtype) pixel_latents_tmp = vae.encode(pixel_values).latent_dist.sample() @@ -1216,28 +1251,32 @@ def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline dtype=pixel_values.dtype, ) - # Sample noise that we'll add to the latents - noise = torch.randn_like(pixel_latents).to(accelerator.device).to(dtype=weight_dtype) bsz = pixel_latents.shape[0] - + noise = torch.randn_like(pixel_latents).to(accelerator.device).to(dtype=weight_dtype) # Sample a random timestep for each image - t = torch.sigmoid(torch.randn((bsz,), device=accelerator.device, dtype=weight_dtype)) + # for weighting schemes where we sample timesteps non-uniformly + u = compute_density_for_timestep_sampling( + weighting_scheme=args.weighting_scheme, + batch_size=bsz, + logit_mean=args.logit_mean, + logit_std=args.logit_std, + mode_scale=args.mode_scale, + ) + indices = (u * noise_scheduler_copy.config.num_train_timesteps).long() + timesteps = noise_scheduler_copy.timesteps[indices].to(device=pixel_latents.device) - # apply flow matching - noisy_latents = ( - 1 - t.unsqueeze(1).unsqueeze(2).repeat(1, pixel_latents.shape[1], pixel_latents.shape[2]) - ) * pixel_latents + t.unsqueeze(1).unsqueeze(2).repeat( - 1, pixel_latents.shape[1], pixel_latents.shape[2] - ) * noise + # Add noise according to flow matching. + sigmas = get_sigmas(timesteps, n_dim=pixel_latents.ndim, dtype=pixel_latents.dtype) + noisy_model_input = (1.0 - sigmas) * pixel_latents + sigmas * noise guidance_vec = torch.full( - (noisy_latents.shape[0],), args.guidance_scale, device=noisy_latents.device, dtype=weight_dtype + (noisy_model_input.shape[0],), args.guidance_scale, device=noisy_model_input.device, dtype=weight_dtype ) controlnet_block_samples, controlnet_single_block_samples = flux_controlnet( - hidden_states=noisy_latents, + hidden_states=noisy_model_input, controlnet_cond=control_image, - timestep=t, + timestep=timesteps / 1000, guidance=guidance_vec, pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), @@ -1247,8 +1286,8 @@ def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline ) noise_pred = flux_transformer( - hidden_states=noisy_latents, - timestep=t, + hidden_states=noisy_model_input, + timestep=timesteps / 1000, guidance=guidance_vec, pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), From 3dc16cac98ec7e0b42afae676648ec50dd4118a3 Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 16 Sep 2024 07:48:02 -0400 Subject: [PATCH 17/40] add note --- examples/controlnet/README_flux.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index 3887f6dfb729..5901326a56f4 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -4,6 +4,13 @@ The `train_controlnet_flux.py` script shows how to implement the ControlNet trai Training script provided by LibAI, which is an institution dedicated to the progress and achievement of artificial general intelligence. LibAI is the developer of [cutout.pro](https://www.cutout.pro/) and [promeai.pro](https://www.promeai.pro/). +> [!NOTE] +> **Memory consumption** +> +> Flux can be quite expensive to run on consumer hardware devices and as a result, ControlNet training of it comes with higher memory requirements than usual. +> **Gated access** +> As the model is gated, before using it with diffusers you first need to go to the [FLUX.1 [dev] Hugging Face page](https://huggingface.co/black-forest-labs/FLUX.1-dev), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in: `huggingface-cli login` + ## Running locally with PyTorch ### Installing the dependencies From aff09514e377cccb997f84c6be95ecae04029dba Mon Sep 17 00:00:00 2001 From: PromeAI Date: Mon, 16 Sep 2024 19:51:27 +0800 Subject: [PATCH 18/40] Update examples/controlnet/README_flux.md Co-authored-by: Sayak Paul --- examples/controlnet/README_flux.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index 5901326a56f4..fc8f3aec1fae 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -3,6 +3,13 @@ The `train_controlnet_flux.py` script shows how to implement the ControlNet training procedure and adapt it for [FLUX](https://github.com/black-forest-labs/flux). Training script provided by LibAI, which is an institution dedicated to the progress and achievement of artificial general intelligence. LibAI is the developer of [cutout.pro](https://www.cutout.pro/) and [promeai.pro](https://www.promeai.pro/). +> [!NOTE] +> **Memory consumption** +> +> Flux can be quite expensive to run on consumer hardware devices and as a result, ControlNet training of it comes with higher memory requirements than usual. + +> **Gated access** +> As the model is gated, before using it with diffusers you first need to go to the [FLUX.1 [dev] Hugging Face page](https://huggingface.co/black-forest-labs/FLUX.1-dev), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in: `huggingface-cli login` > [!NOTE] > **Memory consumption** From 7bdf9e3b8f0eb06affc0e3d859d44f9452edca39 Mon Sep 17 00:00:00 2001 From: Your Name Date: Wed, 18 Sep 2024 23:14:17 -0400 Subject: [PATCH 19/40] make code style and quality --- examples/controlnet/train_controlnet_flux.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 0cf322a80141..e0b7facbb997 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and import argparse +import copy import functools import gc import logging @@ -23,7 +24,7 @@ import shutil from contextlib import nullcontext from pathlib import Path -import copy + import accelerate import numpy as np import torch @@ -54,11 +55,12 @@ from diffusers.models.controlnet_flux import FluxControlNetModel from diffusers.optimization import get_scheduler from diffusers.pipelines.flux.pipeline_flux_controlnet import FluxControlNetPipeline +from diffusers.training_utils import clear_objs_and_retain_memory, compute_density_for_timestep_sampling from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_torch_npu_available, is_xformers_available from diffusers.utils.torch_utils import is_compiled_module -from diffusers.training_utils import clear_objs_and_retain_memory, compute_density_for_timestep_sampling + if is_wandb_available(): import wandb @@ -597,7 +599,7 @@ def parse_args(input_args=None): default=3.5, help="the guidance scale used for transformer.", ) - + parser.add_argument( "--save_weight_dtype", type=str, @@ -1214,7 +1216,7 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): while len(sigma.shape) < n_dim: sigma = sigma.unsqueeze(-1) return sigma - + image_logs = None for epoch in range(first_epoch, args.num_train_epochs): for step, batch in enumerate(train_dataloader): From c862d393b83d7ef3b5c9155b9f8a1e9e1142ed22 Mon Sep 17 00:00:00 2001 From: Your Name Date: Wed, 18 Sep 2024 23:52:40 -0400 Subject: [PATCH 20/40] fix some unnoticed error --- examples/controlnet/train_controlnet_flux.py | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index e0b7facbb997..797fe9b04da8 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -123,12 +123,10 @@ def log_validation( ) image_logs = [] - if is_final_validation or torch.backends.mps.is_available(): - autocast_ctx = nullcontext() - else: - # t5 seems not support autocast and i don't know why - autocast_ctx = nullcontext() - # autocast_ctx = torch.autocast(accelerator.device.type) + + # t5 seems not support autocast and i don't know why + autocast_ctx = nullcontext() + # autocast_ctx = torch.autocast(accelerator.device.type) for validation_prompt, validation_image in zip(validation_prompts, validation_images): from diffusers.utils import load_image @@ -194,10 +192,7 @@ def log_validation( else: logger.warning(f"image logging not implemented for {tracker.name}") - del pipeline - gc.collect() - torch.cuda.empty_cache() - + clear_objs_and_retain_memory([pipeline]) return image_logs @@ -1048,9 +1043,6 @@ def load_model_hook(models, input_dir): vae.to(accelerator.device, dtype=weight_dtype) flux_transformer.to(accelerator.device, dtype=weight_dtype) - text_encoder_one.to(accelerator.device, dtype=weight_dtype) - text_encoder_two.to(accelerator.device, dtype=weight_dtype) - # flux_controlnet.to(accelerator.device, dtype=weight_dtype) def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline, weight_dtype, is_train=True): prompt_batch = batch[args.caption_column] From 4b979e0b95e6f543ff89933dae1190350dfe908d Mon Sep 17 00:00:00 2001 From: Your Name Date: Wed, 18 Sep 2024 23:55:12 -0400 Subject: [PATCH 21/40] make code style and quality --- examples/controlnet/train_controlnet_flux.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 797fe9b04da8..d828b86f7c0d 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -16,7 +16,6 @@ import argparse import copy import functools -import gc import logging import math import os From 90badc2955e648a8e34e032c80f0b859c4319105 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 19 Sep 2024 05:06:42 -0400 Subject: [PATCH 22/40] add example controlnet in readme --- examples/controlnet/README_flux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index fc8f3aec1fae..6da3b9b3069c 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -129,7 +129,7 @@ from diffusers.pipelines.flux.pipeline_flux_controlnet import FluxControlNetPipe from diffusers.models.controlnet_flux import FluxControlNetModel base_model = 'black-forest-labs/FLUX.1-dev' -controlnet_model = 'path to controlnet' +controlnet_model = 'promeai/FLUX.1-controlnet-lineart-promeai' controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16) pipe = FluxControlNetPipeline.from_pretrained( base_model, From e3d10bc1ee4451d9f7696c3d9861628797ce3225 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 19 Sep 2024 05:28:56 -0400 Subject: [PATCH 23/40] add test controlnet --- examples/controlnet/test_controlnet.py | 28 ++++++++++++++++++++ examples/controlnet/train_controlnet_flux.py | 2 +- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/examples/controlnet/test_controlnet.py b/examples/controlnet/test_controlnet.py index 77b5614c7fb0..2cb5ac9e1f28 100644 --- a/examples/controlnet/test_controlnet.py +++ b/examples/controlnet/test_controlnet.py @@ -136,3 +136,31 @@ def test_controlnet_sd3(self): run_command(self._launch_args + test_args) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "diffusion_pytorch_model.safetensors"))) + + +class ControlNetflux(ExamplesTestsAccelerate): + def test_controlnet_flux(self): + with tempfile.TemporaryDirectory() as tmpdir: + test_args = f""" + examples/controlnet/train_controlnet_flux.py + --pretrained_model_name_or_path=black-forest-labs/FLUX.1-dev + --output_dir={tmpdir} + --controlnet_model_name_or_path=promeai/FLUX.1-controlnet-lineart-promeai + --dataset_name=hf-internal-testing/fill10 + --conditioning_image_column=conditioning_image + --image_column=image + --caption_column=text + --resolution=64 + --train_batch_size=1 + --gradient_accumulation_steps=1 + --max_train_steps=4 + --checkpointing_steps=2 + --num_double_layers=4 + --num_single_layers=0 + """.split() + + run_command(self._launch_args + test_args) + + self.assertTrue(os.path.isfile(os.path.join(tmpdir, "diffusion_pytorch_model.safetensors"))) + + diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index d828b86f7c0d..89c8c7cfdda5 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -607,7 +607,7 @@ def parse_args(input_args=None): parser.add_argument( "--weighting_scheme", type=str, - default="none", + default="logit_normal", choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), ) From f9400a6f46bc8604f6f108611912d455b0108ee7 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 19 Sep 2024 05:32:44 -0400 Subject: [PATCH 24/40] rm Remove duplicate notes --- examples/controlnet/README_flux.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index 6da3b9b3069c..3970f37fb9d5 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -11,12 +11,6 @@ Training script provided by LibAI, which is an institution dedicated to the prog > **Gated access** > As the model is gated, before using it with diffusers you first need to go to the [FLUX.1 [dev] Hugging Face page](https://huggingface.co/black-forest-labs/FLUX.1-dev), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in: `huggingface-cli login` -> [!NOTE] -> **Memory consumption** -> -> Flux can be quite expensive to run on consumer hardware devices and as a result, ControlNet training of it comes with higher memory requirements than usual. -> **Gated access** -> As the model is gated, before using it with diffusers you first need to go to the [FLUX.1 [dev] Hugging Face page](https://huggingface.co/black-forest-labs/FLUX.1-dev), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in: `huggingface-cli login` ## Running locally with PyTorch From de06965c6edf4f596222bd72d8a74940922e5746 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 19 Sep 2024 05:40:17 -0400 Subject: [PATCH 25/40] Fix formatting errors --- examples/controlnet/README_flux.md | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index 3970f37fb9d5..0cc5a18ccc12 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -9,6 +9,7 @@ Training script provided by LibAI, which is an institution dedicated to the prog > Flux can be quite expensive to run on consumer hardware devices and as a result, ControlNet training of it comes with higher memory requirements than usual. > **Gated access** +> > As the model is gated, before using it with diffusers you first need to go to the [FLUX.1 [dev] Hugging Face page](https://huggingface.co/black-forest-labs/FLUX.1-dev), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in: `huggingface-cli login` From 17fc1ee3cf78a90b6b07a6d1ac5bbb2a85a08981 Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 20 Sep 2024 01:32:23 -0400 Subject: [PATCH 26/40] add new control image --- examples/controlnet/README_flux.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/controlnet/README_flux.md b/examples/controlnet/README_flux.md index 0cc5a18ccc12..d8be36a6e17a 100644 --- a/examples/controlnet/README_flux.md +++ b/examples/controlnet/README_flux.md @@ -134,8 +134,8 @@ pipe = FluxControlNetPipeline.from_pretrained( # enable memory optimizations pipe.enable_model_cpu_offload() -control_image = load_image("./conditioning_image_1.png").resize((1024, 1024)) -prompt = "pale golden rod circle with old lace background" +control_image = load_image("https://huggingface.co/promeai/FLUX.1-controlnet-lineart-promeai/resolve/main/images/example-control.jpg")resize((1024, 1024)) +prompt = "cute anime girl with massive fluffy fennec ears and a big fluffy tail blonde messy long hair blue eyes wearing a maid outfit with a long black gold leaf pattern dress and a white apron mouth open holding a fancy black forest cake with candles on top in the kitchen of an old dark Victorian mansion lit by candlelight with a bright window to the foggy forest and very expensive stuff everywhere" image = pipe( prompt, From b533cae596975a8d6a8590c409484d26d07aac85 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 22 Sep 2024 23:06:49 -0400 Subject: [PATCH 27/40] add model cpu offload --- examples/controlnet/train_controlnet_flux.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 89c8c7cfdda5..8fe1f5489e85 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -54,6 +54,7 @@ from diffusers.models.controlnet_flux import FluxControlNetModel from diffusers.optimization import get_scheduler from diffusers.pipelines.flux.pipeline_flux_controlnet import FluxControlNetPipeline +from diffusers.pipelines.flux.pipeline_flux import FluxPipeline from diffusers.training_utils import clear_objs_and_retain_memory, compute_density_for_timestep_sampling from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card @@ -95,8 +96,7 @@ def log_validation( torch_dtype=torch.bfloat16, ) - # pipeline.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipeline.scheduler.config) - pipeline = pipeline.to(accelerator.device) + pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: @@ -623,6 +623,11 @@ def parse_args(input_args=None): default=1.29, help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", ) + parser.add_argument( + "--enable_model_cpu_offload", + action="store_true", + help="Enable model cpu offload and save memory.", + ) if input_args is not None: @@ -912,7 +917,11 @@ def main(args): tokenizer_2=tokenizer_two, transformer=flux_transformer, controlnet=flux_controlnet, - ).to(accelerator.device) + ) + if args.enable_model_cpu_offload: + flux_controlnet_pipeline.enable_model_cpu_offload() + else: + flux_controlnet_pipeline.to(accelerator.device) def unwrap_model(model): model = accelerator.unwrap_model(model) @@ -1064,7 +1073,6 @@ def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline # text_ids [512,3] to [bs,512,3] text_ids = text_ids.unsqueeze(0).expand(prompt_embeds.shape[0], -1, -1) - # unet_added_cond_kwargs = {"pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids} return {"prompt_embeds": prompt_embeds, "pooled_prompt_embeds": pooled_prompt_embeds, "text_ids": text_ids} train_dataset = get_train_dataset(args, accelerator) @@ -1083,10 +1091,9 @@ def compute_embeddings(batch, proportion_empty_prompts, flux_controlnet_pipeline # details: https://github.com/huggingface/diffusers/pull/4038#discussion_r1266078401 new_fingerprint = Hasher.hash(args) train_dataset = train_dataset.map( - compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint, batch_size=100 + compute_embeddings_fn, batched=True, new_fingerprint=new_fingerprint, batch_size=50 ) - clear_objs_and_retain_memory([text_encoders, tokenizers]) # Then get the training dataset ready to be passed to the dataloader. From 4d7c1afb395cebd02f9c7cea8b2830927dd333e0 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 22 Sep 2024 23:22:35 -0400 Subject: [PATCH 28/40] update help for adafactor --- examples/controlnet/train_controlnet_flux.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 8fe1f5489e85..1c0f933f390f 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -399,7 +399,13 @@ def parse_args(input_args=None): "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( - "--use_adafactor", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + "--use_adafactor", + action="store_true", + help=( + "Adafactor is a stochastic optimization method based on Adam that reduces memory usage while retaining" + "the empirical benefits of adaptivity. This is achieved through maintaining a factored representation " + "of the squared gradient accumulator across training steps." + ), ) parser.add_argument( "--dataloader_num_workers", From 49a1492054a4c07ef3e56e1e9790ed8e5d1641da Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 24 Sep 2024 03:04:43 -0400 Subject: [PATCH 29/40] make quality & style --- examples/controlnet/train_controlnet_flux.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 1c0f933f390f..6cb0afd1dbdc 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -54,7 +54,6 @@ from diffusers.models.controlnet_flux import FluxControlNetModel from diffusers.optimization import get_scheduler from diffusers.pipelines.flux.pipeline_flux_controlnet import FluxControlNetPipeline -from diffusers.pipelines.flux.pipeline_flux import FluxPipeline from diffusers.training_utils import clear_objs_and_retain_memory, compute_density_for_timestep_sampling from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card From d895b8ff1f552b5deb6519e626f8d41bf60208d4 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 24 Sep 2024 04:27:50 -0400 Subject: [PATCH 30/40] make quality and style --- examples/controlnet/test_controlnet.py | 2 -- examples/controlnet/train_controlnet_flux.py | 33 +++++++++++-------- .../flux/pipeline_flux_controlnet.py | 12 +++++-- 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/examples/controlnet/test_controlnet.py b/examples/controlnet/test_controlnet.py index 2cb5ac9e1f28..5e3398ee312e 100644 --- a/examples/controlnet/test_controlnet.py +++ b/examples/controlnet/test_controlnet.py @@ -162,5 +162,3 @@ def test_controlnet_flux(self): run_command(self._launch_args + test_args) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "diffusion_pytorch_model.safetensors"))) - - diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 6cb0afd1dbdc..424b6486d4f3 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -87,7 +87,9 @@ def log_validation( torch_dtype=torch.bfloat16, ) else: - flux_controlnet = FluxControlNetModel.from_pretrained(args.output_dir, torch_dtype=torch.bfloat16, variant=args.save_weight_dtype) + flux_controlnet = FluxControlNetModel.from_pretrained( + args.output_dir, torch_dtype=torch.bfloat16, variant=args.save_weight_dtype + ) pipeline = FluxControlNetPipeline.from_pretrained( args.pretrained_model_name_or_path, controlnet=flux_controlnet, @@ -240,6 +242,7 @@ def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=N model_card.save(os.path.join(repo_folder, "README.md")) + def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.") parser.add_argument( @@ -402,8 +405,8 @@ def parse_args(input_args=None): action="store_true", help=( "Adafactor is a stochastic optimization method based on Adam that reduces memory usage while retaining" - "the empirical benefits of adaptivity. This is achieved through maintaining a factored representation " - "of the squared gradient accumulator across training steps." + "the empirical benefits of adaptivity. This is achieved through maintaining a factored representation " + "of the squared gradient accumulator across training steps." ), ) parser.add_argument( @@ -601,12 +604,14 @@ def parse_args(input_args=None): parser.add_argument( "--save_weight_dtype", - type=str, + type=str, default="fp32", - choices=["fp16", "bf16", "fp32",], - help=( - "Preserve precision type according to selected weight" - ), + choices=[ + "fp16", + "bf16", + "fp32", + ], + help=("Preserve precision type according to selected weight"), ) parser.add_argument( @@ -634,7 +639,6 @@ def parse_args(input_args=None): help="Enable model cpu offload and save memory.", ) - if input_args is not None: args = parser.parse_args(input_args) else: @@ -1275,13 +1279,16 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): noisy_model_input = (1.0 - sigmas) * pixel_latents + sigmas * noise guidance_vec = torch.full( - (noisy_model_input.shape[0],), args.guidance_scale, device=noisy_model_input.device, dtype=weight_dtype + (noisy_model_input.shape[0],), + args.guidance_scale, + device=noisy_model_input.device, + dtype=weight_dtype, ) controlnet_block_samples, controlnet_single_block_samples = flux_controlnet( hidden_states=noisy_model_input, controlnet_cond=control_image, - timestep=timesteps / 1000, + timestep=timesteps / 1000, guidance=guidance_vec, pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), @@ -1292,7 +1299,7 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): noise_pred = flux_transformer( hidden_states=noisy_model_input, - timestep=timesteps / 1000, + timestep=timesteps / 1000, guidance=guidance_vec, pooled_projections=batch["unet_added_conditions"]["pooled_prompt_embeds"].to(dtype=weight_dtype), encoder_hidden_states=batch["prompt_ids"].to(dtype=weight_dtype), @@ -1381,7 +1388,7 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): elif args.save_weight_dtype == "bf16": save_weight_dtype = torch.bfloat16 flux_controlnet.to(save_weight_dtype) - flux_controlnet.save_pretrained(args.output_dir,variant=args.save_weight_dtype) + flux_controlnet.save_pretrained(args.output_dir, variant=args.save_weight_dtype) # Run a final round of validation. # Setting `vae`, `unet`, and `controlnet` to None to load automatically from `args.output_dir`. diff --git a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py index 218b2eb7ded1..0ee5313d9365 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py @@ -861,8 +861,16 @@ def __call__( return_dict=False, ) # ensure dtype - processed_controlnet_block_samples = [sample.to(dtype=latents.dtype) for sample in controlnet_block_samples] if controlnet_block_samples is not None else None - processed_controlnet_single_block_samples = [sample.to(dtype=latents.dtype) for sample in controlnet_single_block_samples] if controlnet_single_block_samples is not None else None + processed_controlnet_block_samples = ( + [sample.to(dtype=latents.dtype) for sample in controlnet_block_samples] + if controlnet_block_samples is not None + else None + ) + processed_controlnet_single_block_samples = ( + [sample.to(dtype=latents.dtype) for sample in controlnet_single_block_samples] + if controlnet_single_block_samples is not None + else None + ) guidance = ( torch.tensor([guidance_scale], device=device) if self.transformer.config.guidance_embeds else None From b6a90211bdab911209f82505aa7a65330065aa2f Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 24 Sep 2024 05:59:43 -0400 Subject: [PATCH 31/40] rename flux_controlnet_model_name_or_path --- examples/controlnet/train_controlnet_flux.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 424b6486d4f3..ee7870dd17ea 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -259,7 +259,7 @@ def parse_args(input_args=None): help="Path to an improved VAE to stabilize training. For more details check out: https://github.com/huggingface/diffusers/pull/4038.", ) parser.add_argument( - "--flux_controlnet_model_name_or_path", + "--controlnet_model_name_or_path", type=str, default=None, help="Path to pretrained controlnet model or model identifier from huggingface.co/models." @@ -891,9 +891,9 @@ def main(args): revision=args.revision, variant=args.variant, ) - if args.flux_controlnet_model_name_or_path: + if args.controlnet_model_name_or_path: logger.info("Loading existing controlnet weights") - flux_controlnet = FluxControlNetModel.from_pretrained(args.flux_controlnet_model_name_or_path) + flux_controlnet = FluxControlNetModel.from_pretrained(args.controlnet_model_name_or_path) else: logger.info("Initializing controlnet weights from transformer") # we can define the num_layers, num_single_layers, From b097d0d61a60ab066bd7e8fae3f38251d9023d6a Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 24 Sep 2024 22:04:57 -0400 Subject: [PATCH 32/40] fix back src/diffusers/pipelines/flux/pipeline_flux_controlnet.py --- .../pipelines/flux/pipeline_flux_controlnet.py | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py index 0ee5313d9365..fbd6b4ad1b34 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py @@ -860,17 +860,6 @@ def __call__( joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, ) - # ensure dtype - processed_controlnet_block_samples = ( - [sample.to(dtype=latents.dtype) for sample in controlnet_block_samples] - if controlnet_block_samples is not None - else None - ) - processed_controlnet_single_block_samples = ( - [sample.to(dtype=latents.dtype) for sample in controlnet_single_block_samples] - if controlnet_single_block_samples is not None - else None - ) guidance = ( torch.tensor([guidance_scale], device=device) if self.transformer.config.guidance_embeds else None @@ -883,8 +872,8 @@ def __call__( guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, - controlnet_block_samples=processed_controlnet_block_samples, - controlnet_single_block_samples=processed_controlnet_single_block_samples, + controlnet_block_samples=controlnet_block_samples, + controlnet_single_block_samples=controlnet_single_block_samples, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, @@ -932,4 +921,4 @@ def __call__( if not return_dict: return (image,) - return FluxPipelineOutput(images=image) + return FluxPipelineOutput(images=image) \ No newline at end of file From 49787e30763e49fbed7c8c698ee39823d8a51cb7 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 26 Sep 2024 01:17:19 -0400 Subject: [PATCH 33/40] fix dtype error by pre calculate text emb --- examples/controlnet/train_controlnet_flux.py | 22 ++++++----- .../flux/pipeline_flux_controlnet.py | 39 ++++++++++++------- 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index ee7870dd17ea..a6ba3c09a534 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -79,7 +79,6 @@ def log_validation( if not is_final_validation: flux_controlnet = accelerator.unwrap_model(flux_controlnet) - pipeline = FluxControlNetPipeline.from_pretrained( args.pretrained_model_name_or_path, controlnet=flux_controlnet, @@ -123,27 +122,30 @@ def log_validation( ) image_logs = [] - - # t5 seems not support autocast and i don't know why - autocast_ctx = nullcontext() - # autocast_ctx = torch.autocast(accelerator.device.type) + if is_final_validation or torch.backends.mps.is_available(): + autocast_ctx = nullcontext() + else: + autocast_ctx = torch.autocast(accelerator.device.type) for validation_prompt, validation_image in zip(validation_prompts, validation_images): from diffusers.utils import load_image validation_image = load_image(validation_image) - # need to inference on 1024 to get a good image - validation_image = validation_image.resize((1024, 1024)) - # validation_image = validation_image.resize((args.resolution, args.resolution)) + # maybe need to inference on 1024 to get a good image + validation_image = validation_image.resize((args.resolution, args.resolution)) images = [] + # pre calculate prompt embeds, pooled prompt embeds, text ids because t5 does not support autocast + prompt_embeds, pooled_prompt_embeds, text_ids = pipeline.encode_prompt( + validation_prompt, prompt_2=validation_prompt + ) for _ in range(args.num_validation_images): with autocast_ctx: # need to fix in pipeline_flux_controlnet - image = pipeline( - prompt=validation_prompt, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, control_image=validation_image, num_inference_steps=28, controlnet_conditioning_scale=0.7, diff --git a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py index fbd6b4ad1b34..e5fe83f4517b 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py @@ -747,10 +747,12 @@ def __call__( width_control_image, ) - # set control mode + # Here we ensure that `control_mode` has the same length as the control_image. if control_mode is not None: + if not isinstance(control_mode, int): + raise ValueError(" For `FluxControlNet`, `control_mode` should be an `int` or `None`") control_mode = torch.tensor(control_mode).to(device, dtype=torch.long) - control_mode = control_mode.reshape([-1, 1]) + control_mode = control_mode.view(-1, 1).expand(control_image.shape[0], 1) elif isinstance(self.controlnet, FluxMultiControlNetModel): control_images = [] @@ -785,16 +787,22 @@ def __call__( control_image = control_images + # Here we ensure that `control_mode` has the same length as the control_image. + if isinstance(control_mode, list) and len(control_mode) != len(control_image): + raise ValueError( + "For Multi-ControlNet, `control_mode` must be a list of the same " + + " length as the number of controlnets (control images) specified" + ) + if not isinstance(control_mode, list): + control_mode = [control_mode] * len(control_image) # set control mode - control_mode_ = [] - if isinstance(control_mode, list): - for cmode in control_mode: - if cmode is None: - control_mode_.append(-1) - else: - control_mode_.append(cmode) - control_mode = torch.tensor(control_mode_).to(device, dtype=torch.long) - control_mode = control_mode.reshape([-1, 1]) + control_modes = [] + for cmode in control_mode: + if cmode is None: + cmode = -1 + control_mode = torch.tensor(cmode).expand(control_images[0].shape[0]).to(device, dtype=torch.long) + control_modes.append(control_mode) + control_mode = control_modes # 4. Prepare latent variables num_channels_latents = self.transformer.config.in_channels // 4 @@ -840,9 +848,12 @@ def __call__( # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]).to(latents.dtype) - guidance = ( - torch.tensor([guidance_scale], device=device) if self.controlnet.config.guidance_embeds else None - ) + if isinstance(self.controlnet, FluxMultiControlNetModel): + use_guidance = self.controlnet.nets[0].config.guidance_embeds + else: + use_guidance = self.controlnet.config.guidance_embeds + + guidance = torch.tensor([guidance_scale], device=device) if use_guidance else None guidance = guidance.expand(latents.shape[0]) if guidance is not None else None # controlnet From e9d3e0499b62c64c68c60d7a17b73eeb9870e462 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 26 Sep 2024 01:25:13 -0400 Subject: [PATCH 34/40] rm image save --- examples/controlnet/train_controlnet_flux.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index a6ba3c09a534..b74075c6a1a2 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -152,7 +152,6 @@ def log_validation( guidance_scale=3.5, generator=generator, ).images[0] - image.save("image.jpg") images.append(image) image_logs.append( {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt} From 25fc313e4814b8918270280de6cc93be5e711eec Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 26 Sep 2024 05:15:29 -0400 Subject: [PATCH 35/40] quality fix --- src/diffusers/pipelines/flux/pipeline_flux_controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py index e5fe83f4517b..6c072c482020 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py @@ -932,4 +932,4 @@ def __call__( if not return_dict: return (image,) - return FluxPipelineOutput(images=image) \ No newline at end of file + return FluxPipelineOutput(images=image) From 2ee67c4751a6ff4ba57cac78d4aaa750596197c6 Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 26 Sep 2024 22:00:58 -0400 Subject: [PATCH 36/40] fix test --- examples/controlnet/test_controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/controlnet/test_controlnet.py b/examples/controlnet/test_controlnet.py index 5e3398ee312e..7cc11ea20270 100644 --- a/examples/controlnet/test_controlnet.py +++ b/examples/controlnet/test_controlnet.py @@ -143,7 +143,7 @@ def test_controlnet_flux(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/controlnet/train_controlnet_flux.py - --pretrained_model_name_or_path=black-forest-labs/FLUX.1-dev + --pretrained_model_name_or_path=hf-internal-testing/tiny-flux-pipe --output_dir={tmpdir} --controlnet_model_name_or_path=promeai/FLUX.1-controlnet-lineart-promeai --dataset_name=hf-internal-testing/fill10 From 7cedfb1fb455f51e2098dbdbc7dc49283ad78f2a Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 26 Sep 2024 23:27:14 -0400 Subject: [PATCH 37/40] fix tiny flux train error --- examples/controlnet/test_controlnet.py | 5 ++--- examples/controlnet/train_controlnet_flux.py | 21 +++++++++++--------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/examples/controlnet/test_controlnet.py b/examples/controlnet/test_controlnet.py index 7cc11ea20270..3c508f80f1a4 100644 --- a/examples/controlnet/test_controlnet.py +++ b/examples/controlnet/test_controlnet.py @@ -145,7 +145,6 @@ def test_controlnet_flux(self): examples/controlnet/train_controlnet_flux.py --pretrained_model_name_or_path=hf-internal-testing/tiny-flux-pipe --output_dir={tmpdir} - --controlnet_model_name_or_path=promeai/FLUX.1-controlnet-lineart-promeai --dataset_name=hf-internal-testing/fill10 --conditioning_image_column=conditioning_image --image_column=image @@ -155,8 +154,8 @@ def test_controlnet_flux(self): --gradient_accumulation_steps=1 --max_train_steps=4 --checkpointing_steps=2 - --num_double_layers=4 - --num_single_layers=0 + --num_double_layers=1 + --num_single_layers=1 """.split() run_command(self._launch_args + test_args) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index b74075c6a1a2..5c5ac85dfbd7 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -862,14 +862,12 @@ def main(args): args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, - use_fast=False, ) # load t5 tokenizer tokenizer_two = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer_2", revision=args.revision, - use_fast=False, ) # load clip text encoder text_encoder_one = CLIPTextModel.from_pretrained( @@ -898,9 +896,10 @@ def main(args): else: logger.info("Initializing controlnet weights from transformer") # we can define the num_layers, num_single_layers, - # default values are num_layers=4, num_single_layers=10 flux_controlnet = FluxControlNetModel.from_transformer( flux_transformer, + attention_head_dim=flux_transformer.config["attention_head_dim"], + num_attention_heads=flux_transformer.config["num_attention_heads"], num_layers=args.num_double_layers, num_single_layers=args.num_single_layers, ) @@ -1279,12 +1278,16 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): sigmas = get_sigmas(timesteps, n_dim=pixel_latents.ndim, dtype=pixel_latents.dtype) noisy_model_input = (1.0 - sigmas) * pixel_latents + sigmas * noise - guidance_vec = torch.full( - (noisy_model_input.shape[0],), - args.guidance_scale, - device=noisy_model_input.device, - dtype=weight_dtype, - ) + # handle guidance + if flux_transformer.config.guidance_embeds: + guidance_vec = torch.full( + (noisy_model_input.shape[0],), + args.guidance_scale, + device=noisy_model_input.device, + dtype=weight_dtype, + ) + else: + guidance_vec = None controlnet_block_samples, controlnet_single_block_samples = flux_controlnet( hidden_states=noisy_model_input, From dcac1b00d13214b94fff94eb56bc3781016d242b Mon Sep 17 00:00:00 2001 From: Your Name Date: Thu, 26 Sep 2024 23:59:11 -0400 Subject: [PATCH 38/40] change report to to tensorboard --- examples/controlnet/train_controlnet_flux.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 5c5ac85dfbd7..331c92c53a09 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -451,7 +451,7 @@ def parse_args(input_args=None): parser.add_argument( "--report_to", type=str, - default="wandb", + default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' From 89a1f353e02902e7bb740e93672b9750e6ae4444 Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 27 Sep 2024 01:41:57 -0400 Subject: [PATCH 39/40] fix save name error when test --- examples/controlnet/train_controlnet_flux.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 331c92c53a09..60afc4eb203e 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -1392,8 +1392,10 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): elif args.save_weight_dtype == "bf16": save_weight_dtype = torch.bfloat16 flux_controlnet.to(save_weight_dtype) + if args.save_weight_dtype != "fp32": flux_controlnet.save_pretrained(args.output_dir, variant=args.save_weight_dtype) - + else: + flux_controlnet.save_pretrained(args.output_dir) # Run a final round of validation. # Setting `vae`, `unet`, and `controlnet` to None to load automatically from `args.output_dir`. image_logs = None From 6ccd3e46c3d63572f0a0ae80226161ae210ec6c7 Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 27 Sep 2024 02:02:29 -0400 Subject: [PATCH 40/40] Fix shrinking errors --- examples/controlnet/train_controlnet_flux.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/controlnet/train_controlnet_flux.py b/examples/controlnet/train_controlnet_flux.py index 60afc4eb203e..e344a9b1e2a5 100644 --- a/examples/controlnet/train_controlnet_flux.py +++ b/examples/controlnet/train_controlnet_flux.py @@ -1392,10 +1392,10 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): elif args.save_weight_dtype == "bf16": save_weight_dtype = torch.bfloat16 flux_controlnet.to(save_weight_dtype) - if args.save_weight_dtype != "fp32": - flux_controlnet.save_pretrained(args.output_dir, variant=args.save_weight_dtype) - else: - flux_controlnet.save_pretrained(args.output_dir) + if args.save_weight_dtype != "fp32": + flux_controlnet.save_pretrained(args.output_dir, variant=args.save_weight_dtype) + else: + flux_controlnet.save_pretrained(args.output_dir) # Run a final round of validation. # Setting `vae`, `unet`, and `controlnet` to None to load automatically from `args.output_dir`. image_logs = None