Diffusers学习

Diffusers库学习笔记

Diffusers是一个强大的用于生成式人工智能的工具库. 本文记录的是我研究生期间学习Diffusers掌握的一些方法和技巧.



一、环境

确保你的PyTorch版本大于1.7.0.
在你的虚拟环境中直接安装Diffusers库即可:
pip install diffusers
运行该命令后, 会自动下载一些附加的配置.
需要安装accelerate库, 同样使用命令:
pip install accelerate
需要安装transformers库, 同样使用命令:
pip install transformers

二、调用DiffusionPipeline生成图片

简易代码:

from diffusers import DiffusionPipeline # 在Diffusers库中导入Diffusion管道
import torch # PyTorch库

model_id = "runwayml/stable-diffusion-v1-5" # 模型id, 建议你从HuggingFace的官网上下载下来, 放在本地的项目路径下
pipeline = DiffusionPipeline.from_pretrained(model_id) # 加载管道

prompt = "a cute cat, eating carrots." # 文本提示词
pipeline = pipeline.to("cuda") # 将数据加载到cuda上


generator = torch.Generator("cuda").manual_seed(15) # 构建生成器, 这里设置不同的随机数, 生成的图片也会不同

image = pipeline(prompt, generator=generator).images[0] # 获取生成的图片

image.save("image.jpg") # 将图片保存至本地, 运行程序后, 你会在路径下看到一张image.jpg的图片

改进:

from diffusers import DiffusionPipeline # 在Diffusers库中导入Diffusion管道
import torch # PyTorch库

model_id = "runwayml/stable-diffusion-v1-5" # 模型id, 建议你从HuggingFace的官网上下载下来, 放在本地的项目路径下
pipeline = DiffusionPipeline.from_pretrained(model_id, use_safetensors=True, torch_dtype=torch.float16) # 加载管道
# 上面的pipeline多了2个参数的设置,
# 1. use_safetensors, 默认为None, 设置为True将从safetensors加载模型
# 2. torch_dtype, 默认为32位的精度, 这里设置为16, 用于加速

prompt = "a cute cat, eating carrots." # 文本提示词
pipeline = pipeline.to("cuda") # 将数据加载到cuda上


generator = torch.Generator("cuda").manual_seed(13215) # 构建生成器, 这里设置不同的随机数, 生成的图片也会不同

image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] # 获取生成的图片
# 这里pipeline的参数中增加了num_inference_steps, 即推理图片的步数.

image.save("image.jpg") # 将图片保存至本地, 运行程序后, 你会在路径下看到一张image.jpg的图片

使用调度器:

from diffusers import DiffusionPipeline # 在Diffusers库中导入Diffusion管道
from diffusers import DPMSolverMultistepScheduler # 导入DPM调度器
import torch # PyTorch库

model_id = "runwayml/stable-diffusion-v1-5" # 模型id, 建议你从HuggingFace的官网上下载下来, 放在本地的项目路径下
pipeline = DiffusionPipeline.from_pretrained(model_id, use_safetensors=True, torch_dtype=torch.float16) # 加载管道
# 上面的pipeline多了2个参数的设置,
# 1. use_safetensors, 默认为None, 设置为True将从safetensors加载模型
# 2. torch_dtype, 默认为32位的精度, 这里设置为16, 用于加速

prompt = "a cute cat, eating carrots." # 文本提示词
pipeline = pipeline.to("cuda") # 将数据加载到cuda上


generator = torch.Generator("cuda").manual_seed(13215) # 构建生成器, 这里设置不同的随机数, 生成的图片也会不同

pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) # 设置pipeline的调度器
image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] # 获取生成的图片
# 这里pipeline的参数中增加了num_inference_steps, 即推理图片的步数.

image.save("image.jpg") # 将图片保存至本地, 运行程序后, 你会在路径下看到一张image.jpg的图片, 调度器不同, 生成效果会有差异

三、训练一个Diffusion无条件图片生成模型

完整的训练代码如下:

from dataclasses import dataclass

# 训练参数设置
@dataclass
class TrainingConfig:
    image_size = 128  # the generated image resolution
    train_batch_size = 16
    eval_batch_size = 16  # how many images to sample during evaluation
    num_epochs = 50
    gradient_accumulation_steps = 1
    learning_rate = 1e-4
    lr_warmup_steps = 500
    save_image_epochs = 10
    save_model_epochs = 30
    mixed_precision = "fp16"  # `no` for float32, `fp16` for automatic mixed precision
    output_dir = "ddpm-butterflies-128"  # the model name locally and on the HF Hub

    push_to_hub = False  # whether to upload the saved model to the HF Hub
    hub_model_id = ""  # the name of the repository to create on the HF Hub
    hub_private_repo = False
    overwrite_output_dir = True  # overwrite the old model when re-running the notebook
    seed = 0


config = TrainingConfig()

from datasets import load_dataset

# 加载数据集, 这里使用的是HuggingFace官方例程里的蝴蝶图片数据集
config.dataset_name = "huggan/smithsonian_butterflies_subset"
dataset = load_dataset(config.dataset_name, split="train") # 数据集的划分方式

from torchvision import transforms
# 数据集的图片预处理
preprocess = transforms.Compose(
    [
        transforms.Resize((config.image_size, config.image_size)), # 图片大小归一化
        transforms.RandomHorizontalFlip(), # 随机水平翻转
        transforms.ToTensor(),
        transforms.Normalize([0.5], [0.5]), # 归一化
    ]
)

def transform(examples):
    images = [preprocess(image.convert("RGB")) for image in examples["image"]]
    return {"images": images}


dataset.set_transform(transform)

import torch

train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True)

from diffusers import UNet2DModel # 导入训练的模型格式, 这里选择的是Diffusers提供的UNet2DModel
# 关于该模型的结构, 这里不做解释
model = UNet2DModel(
    sample_size=config.image_size,  # the target image resolution
    in_channels=3,  # the number of input channels, 3 for RGB images
    out_channels=3,  # the number of output channels
    layers_per_block=2,  # how many ResNet layers to use per UNet block
    block_out_channels=(128, 128, 256, 256, 512, 512),  # the number of output channels for each UNet block
    down_block_types=(
        "DownBlock2D",  # a regular ResNet downsampling block
        "DownBlock2D",
        "DownBlock2D",
        "DownBlock2D",
        "AttnDownBlock2D",  # a ResNet downsampling block with spatial self-attention
        "DownBlock2D",
    ),
    up_block_types=(
        "UpBlock2D",  # a regular ResNet upsampling block
        "AttnUpBlock2D",  # a ResNet upsampling block with spatial self-attention
        "UpBlock2D",
        "UpBlock2D",
        "UpBlock2D",
        "UpBlock2D",
    ),
)
# 这里输出一张图片的shape, 用于检验
sample_image = dataset[0]["images"].unsqueeze(0)
print("Input shape:", sample_image.shape)

print("Output shape:", model(sample_image, timestep=0).sample.shape)

import torch
from PIL import Image
from diffusers import DDPMScheduler # 导入噪声调度器, 这里选择的是DDPMScheduler

noise_scheduler = DDPMScheduler(num_train_timesteps=1000)
noise = torch.randn(sample_image.shape)
timesteps = torch.LongTensor([50])
noisy_image = noise_scheduler.add_noise(sample_image, noise, timesteps)

Image.fromarray(((noisy_image.permute(0, 2, 3, 1) + 1.0) * 127.5).type(torch.uint8).numpy()[0])

import torch.nn.functional as F

noise_pred = model(noisy_image, timesteps).sample
loss = F.mse_loss(noise_pred, noise)

from diffusers.optimization import get_cosine_schedule_with_warmup

optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate)
lr_scheduler = get_cosine_schedule_with_warmup(
    optimizer=optimizer,
    num_warmup_steps=config.lr_warmup_steps,
    num_training_steps=(len(train_dataloader) * config.num_epochs),
)

from diffusers import DDPMPipeline
from diffusers.utils import make_image_grid

def evaluate(config, epoch, pipeline):
    # Sample some images from random noise (this is the backward diffusion process).
    # The default pipeline output type is `List[PIL.Image]`
    images = pipeline(
        batch_size=config.eval_batch_size,
        generator=torch.Generator(device='cpu').manual_seed(config.seed), # Use a separate torch generator to avoid rewinding the random state of the main training loop
    ).images

    # Make a grid out of the images
    image_grid = make_image_grid(images, rows=4, cols=4)

    # Save the images
    test_dir = os.path.join(config.output_dir, "samples")
    os.makedirs(test_dir, exist_ok=True)
    image_grid.save(f"{test_dir}/{epoch:04d}.png")

from accelerate import Accelerator
from huggingface_hub import create_repo, upload_folder
from tqdm.auto import tqdm
from pathlib import Path
import os

def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler):
    # Initialize accelerator and tensorboard logging
    accelerator = Accelerator(
        mixed_precision=config.mixed_precision,
        gradient_accumulation_steps=config.gradient_accumulation_steps,
        log_with="tensorboard",
        project_dir=os.path.join(config.output_dir, "logs"),
    )
    if accelerator.is_main_process:
        if config.output_dir is not None:
            os.makedirs(config.output_dir, exist_ok=True)
        if config.push_to_hub:
            repo_id = create_repo(
                repo_id=config.hub_model_id or Path(config.output_dir).name, exist_ok=True
            ).repo_id
        accelerator.init_trackers("train_example")

    # Prepare everything
    # There is no specific order to remember, you just need to unpack the
    # objects in the same order you gave them to the prepare method.
    model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
        model, optimizer, train_dataloader, lr_scheduler
    )

    global_step = 0

    # Now you train the model
    for epoch in range(config.num_epochs):
        progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process)
        progress_bar.set_description(f"Epoch {epoch}")

        for step, batch in enumerate(train_dataloader):
            clean_images = batch["images"]
            # Sample noise to add to the images
            noise = torch.randn(clean_images.shape, device=clean_images.device)
            bs = clean_images.shape[0]

            # Sample a random timestep for each image
            timesteps = torch.randint(
                0, noise_scheduler.config.num_train_timesteps, (bs,), device=clean_images.device,
                dtype=torch.int64
            )

            # Add noise to the clean images according to the noise magnitude at each timestep
            # (this is the forward diffusion process)
            noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps)

            with accelerator.accumulate(model):
                # Predict the noise residual
                noise_pred = model(noisy_images, timesteps, return_dict=False)[0]
                loss = F.mse_loss(noise_pred, noise)
                accelerator.backward(loss)

                if accelerator.sync_gradients:
                    accelerator.clip_grad_norm_(model.parameters(), 1.0)
                optimizer.step()
                lr_scheduler.step()
                optimizer.zero_grad()

            progress_bar.update(1)
            logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step}
            progress_bar.set_postfix(**logs)
            accelerator.log(logs, step=global_step)
            global_step += 1

        # After each epoch you optionally sample some demo images with evaluate() and save the model
        if accelerator.is_main_process:
            pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler)

            if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1:
                evaluate(config, epoch, pipeline)

            if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1:
                if config.push_to_hub:
                    upload_folder(
                        repo_id=repo_id,
                        folder_path=config.output_dir,
                        commit_message=f"Epoch {epoch}",
                        ignore_patterns=["step_*", "epoch_*"],
                    )
                else:
                    pipeline.save_pretrained(config.output_dir)

from accelerate import notebook_launcher

args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler)

notebook_launcher(train_loop, args, num_processes=1) # 开始训练

训练完成之后, 在你的项目路径中会出现ddpm-butterflies-128文件夹, 该文件夹中是保存好的模型权重.
调用自己训练好的模型生成图片:

from diffusers import DiffusionPipeline
import torch

# from_pretrained 括号中的参数填写模型路径
pipeline = DiffusionPipeline.from_pretrained("ddpm-butterflies-128").to("cuda")
image = pipeline(generator=torch.Generator("cuda").manual_seed(0)).images[0]
image.save("image.png")

四、使用LoRA

如果报如下错误:
ValueError: PEFT backend is required for this method.
请在你的Python环境中安装peft:
pip install peft
样例代码:

from diffusers import DiffusionPipeline # 在Diffusers库中导入Diffusion管道
import torch # PyTorch库

model_id = "runwayml/stable-diffusion-v1-5" # 模型id, 建议你从HuggingFace的官网上下载下来, 放在本地的项目路径下
pipeline = DiffusionPipeline.from_pretrained(model_id).to("cuda") # 加载管道

prompt = "a cute cat, eating carrots." # 文本提示词

# 下面是使用LoRA的代码, 这里使用的是HuggingFace中的latent-consistency/lcm-lora-sdv1-5, 请自行下载到本地并规范路径
lora_model_path = "LoRAs/latent-consistency/lcm-lora-sdv1-5" # lora模型路径
pipeline.load_lora_weights(lora_model_path, weight_name="pytorch_lora_weights.safetensors")
lora_scale = 0.3 # lora

generator = torch.Generator("cuda").manual_seed(178) # 构建生成器, 这里设置不同的随机数, 生成的图片也会不同

image = pipeline(prompt, cross_attention_kwargs={"scale": lora_scale}, generator=generator, num_inference_steps=20).images[0] # 获取生成的图片

image.save("image.jpg") # 将图片保存至本地, 运行程序后, 你会在路径下看到一张image.jpg的图片

混合使用多个lora:

from diffusers import DiffusionPipeline # 在Diffusers库中导入Diffusion管道
import torch # PyTorch库

model_id = "runwayml/stable-diffusion-v1-5" # 模型id, 建议你从HuggingFace的官网上下载下来, 放在本地的项目路径下
pipeline = DiffusionPipeline.from_pretrained(model_id).to("cuda") # 加载管道

prompt = "a cute cat, eating carrots." # 文本提示词

# 下面是混合使用LoRA的代码, 这里使用的是HuggingFace中的公开lora模型, 请自行下载到本地并规范路径
lora_model_path1 = "LoRAs/latent-consistency/lcm-lora-sdv1-5" # lora模型路径
pipeline.load_lora_weights(lora_model_path1, weight_name="pytorch_lora_weights.safetensors", adapter_name="lora1")
lora_model_path2 = "LoRAs/kusnim1121/stable-diffusion-one-piece-lora"
pipeline.load_lora_weights(lora_model_path2, weight_name="pytorch_lora_weights.safetensors", adapter_name="lora2")

lora_scale = 0.5

pipeline.set_adapters(["lora1", "lora2"], adapter_weights=[0.5, 1.0])

# pipeline.disable_lora() # 使加载的lora模型无效

generator = torch.Generator("cuda").manual_seed(178) # 构建生成器, 这里设置不同的随机数, 生成的图片也会不同

image = pipeline(prompt, cross_attention_kwargs={"scale": lora_scale}, generator=generator, num_inference_steps=20).images[0] # 获取生成的图片

image.save("image.jpg") # 将图片保存至本地, 运行程序后, 你会在路径下看到一张image.jpg的图片

五、无条件图片生成

任务介绍: 不需要接收文本等输入条件, 根据你训练的效果生成图片. 例如你用蝴蝶图片训练出的模型, 那么调用该模型生成的, 是随机的蝴蝶图片.

from diffusers import DiffusionPipeline

# 通过DiffusionPipeline管道加载模型
generator = DiffusionPipeline.from_pretrained("ddpm-butterflies-128").to("cuda")
# 直接生成
image = generator(num_inference_steps=100).images[0]

六、文本生成图片

任务介绍: 以文本作为输入条件, 生成符合的图片.

import torch
from diffusers import AutoPipelineForText2Image

# 通过AutoPipelineForText2Image管道加载模型
pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5").to("cuda")

# 设置文本prompt, 然后生成
prompt = "a cute dog."
# image = pipeline(prompt=prompt, num_inference_steps=20).images[0]

# 设置文本prompt, 随机种子, 之后可通过输入同样的种子值复现结果
prompt = "a cute dog."
generator = torch.Generator("cuda").manual_seed(1)
# image = pipeline(prompt=prompt, num_inference_steps=20, generator=generator).images[0]

# 设置文本prompt, 随机种子, 之后可通过输入同样的种子值复现结果
# 设置生成图片的大小
prompt = "a cute dog."
generator = torch.Generator("cuda").manual_seed(1)
# image = pipeline(prompt=prompt, num_inference_steps=20, generator=generator,
#                  height=768, width=512).images[0]

# 设置指引系数guidance, 对于该参数, 值越高表示图像与文本的耦合度越强, 将值设置低能使模型的自我想象力更强
prompt = "a cute dog."
generator = torch.Generator("cuda").manual_seed(1)
# image = pipeline(prompt=prompt, num_inference_steps=20, generator=generator,
#                  guidance_scale=3.5).images[0]

# 负面提示词, 顾名思义, 正面提示词是指引模型生成与之相关的图像, 负面提示词则是在生成中回避该提示词导向的内容
positive_prompt = "a cute dog."
negative_prompt = "black, ugly."
generator = torch.Generator("cuda").manual_seed(1)
image = pipeline(prompt=positive_prompt, negative_prompt = negative_prompt, num_inference_steps=20, generator=generator,
                 guidance_scale=3.5).images[0]

七、图片生成图片

任务介绍: 在文本生成图片的基础上, 多了图片作为输入条件.

import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import load_image
# 图生图模型路径
model_path = "runwayml/stable-diffusion-v1-5"
# 通过AutoPipelineForImage2Image管道加载模型
pipeline = AutoPipelineForImage2Image.from_pretrained(model_path, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("cuda")
# 文本提示词
positive_prompt = "a cute dog."
negative_prompt = "black, ugly."
# 输入图片
input_image = load_image("test.jpg")
# 随机生成器, 可复现结果
generator = torch.Generator("cuda").manual_seed(1)
# 使用strength, 该参数反映与输入图像的关联程度
strength1 = 0.8
strength2 = 0.5
# 生成图片
image1 = pipeline(prompt=positive_prompt, negative_prompt = negative_prompt, num_inference_steps=20, generator=generator,
                 guidance_scale=3.5,
                 image=input_image, strength=strength1).images[0]
image2 = pipeline(prompt=positive_prompt, negative_prompt = negative_prompt, num_inference_steps=20, generator=generator,
                 guidance_scale=3.5,
                 image=input_image, strength=strength2).images[0]
# 保存
image1.save("image1.jpg")
image2.save("image2.jpg")

八、图片修复

任务介绍: 替换或编辑图像中的指定区域. 常见的应用有: 去除图片的某些缺陷, 去除伪影, 替换图像区域.

import torch
from diffusers import AutoPipelineForInpainting
# 模型路径
model_path = "runwayml/stable-diffusion-v1-5"
# 通过 AutoPipelineForInpainting 管道加载模型
pipeline = AutoPipelineForInpainting.from_pretrained(pretrained_model_or_path = model_path,
                                                     torch_dtype = torch.float16,
                                                     use_safetensors = True).to("cuda")
pipeline.enable_model_cpu_offload() # 智能CPU卸载, 是一种降低显存占用的方法, 可以提高推理速度.

from diffusers.utils import load_image, make_image_grid # 从diffusers的工具库中导入加载图片和生成图片的方法

init_image = load_image("inpaint.png") # 输入图片
mask_image = load_image("inpaint_mask.png") # 图片掩膜, 一般是与输入图片同等分辨率的二值图像, 白色区域代表模型生成时修改的部分

prompt = "a black cat with glowing eyes, cute, adorable, disney, pixar, highly detailed, 8k"
negative_prompt = "bad anatomy, deformed, ugly, disfigured"
generator = torch.Generator("cuda").manual_seed(0)
image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask_image, generator=generator).images[0]
# 执行该语句后, 项目路径下会生成13列地网格图
make_image_grid([init_image, mask_image, image], rows=1, cols=3).save("grid.png")

蒙版模糊, 当直接使用蒙版图片时, 可能导致生成的部分与原始图像的剩余部分像素变化十分突兀, 因此可以使用模糊化的蒙版:

import torch
from diffusers import AutoPipelineForInpainting
# 模型路径
model_path = "runwayml/stable-diffusion-v1-5"
# 通过 AutoPipelineForInpainting 管道加载模型
pipeline = AutoPipelineForInpainting.from_pretrained(pretrained_model_or_path = model_path,
                                                     torch_dtype = torch.float16,
                                                     use_safetensors = True).to("cuda")
pipeline.enable_model_cpu_offload() # 智能CPU卸载, 是一种降低显存占用的方法, 可以提高推理速度.

from diffusers.utils import load_image, make_image_grid # 从diffusers的工具库中导入加载图片和生成图片的方法

init_image = load_image("inpaint.png") # 输入图片
mask_image = load_image("inpaint_mask.png") # 图片掩膜, 一般是与输入图片同等分辨率的二值图像, 白色区域代表模型生成时修改的部分
# 创建模糊蒙版, 一般用于柔滑图像的像素变化
mask_image = pipeline.mask_processor.blur(mask_image, blur_factor=33) 

prompt = "a black cat with glowing eyes, cute, adorable, disney, pixar, highly detailed, 8k"
negative_prompt = "bad anatomy, deformed, ugly, disfigured"
generator = torch.Generator("cuda").manual_seed(0)
image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask_image, generator=generator).images[0]
# 执行该语句后, 项目路径下会生成13列地网格图
make_image_grid([init_image, mask_image, image], rows=1, cols=3).save("grid.png")

九、文本引导的基于深度图生成图片

任务介绍: 传递文本提示和初始图像, 模型会自动提取初始图像的深度数据.

import torch
from diffusers import StableDiffusionDepth2ImgPipeline #导入管道
from diffusers.utils import load_image, make_image_grid #导入方法

model_id = "stabilityai/stable-diffusion-2-depth"
# 生成StableDiffusionDepth2ImgPipeline管道
pipeline = StableDiffusionDepth2ImgPipeline.from_pretrained(model_id,
                                            torch_dtype=torch.float16,
                                            use_safetensors=True).to("cuda")
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
init_image = load_image(url)
prompt = "two tigers"
negative_prompt = "bad, deformed, ugly, bad anatomy"
image = pipeline(prompt=prompt, image=init_image, negative_prompt=negative_prompt, strength=0.7).images[0]
make_image_grid([init_image, image], rows=1, cols=2)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值