ppdiffusers模型简单加权融合

该文展示了如何在AIStudio上进行深度学习模型融合,特别是针对图像生成模型。通过加权融合方法(weighted_sum和add_difference)结合anything-v3、basil_mix和pastelmix模型的参数,调整融合比例以产生不同的生成效果,并对融合后的模型进行保存和后续使用。
摘要由CSDN通过智能技术生成

★★★ 本文源自AI Studio社区精品项目,【点击此处】查看更多精品内容 >>>


模型融合

  • 测试融合anything-v3、basil_mix和pastelmix,模型融合方式是将多个模型网络参数简单加权融合

  • 加权融合的方式有两种:

    1.weighted_sum:需要两个模型参数;模型参数A是基底,A和B。结果计算为A·(1-rate)+B·rate

    2.add_difference:一般需要三个模型参数;A、 B和C。结果计算为A+(B-C)·rate

    当把C当成A时,即A+(B-A)·rate = A·(1-rate)+B·rate,此时两种方式等价

前置工作

仅运行一次,之后再也不用运行

import os
os.system("pip install -U ppdiffusers --user")
# 注意,安装后,要重启环境
print('注意,安装后,要重启环境')
!unzip /home/aistudio/data/data189622/basil_mix.zip
!unzip /home/aistudio/data/data188876/Pastelmix.zip
!mkdir outputs/ outputs/test
#  下载模型保存
from ppdiffusers import *
model_name = "Linaqruf/anything-v3.0"
pipe = StableDiffusionPipelineAllinOne.from_pretrained(model_name, safety_checker = None, feature_extractor=None, requires_safety_checker=False)
pipe.save_pretrained(model_name)

代码形式

  • 以anything-v3为基底,混合basil_mix模型

  • 分离各自unet网络的参数,融合

融合方式的参数

融合模块,传入的是unet模型参数

def module_mix(base_dict,second_dict,third_dict=None,rate=0.5,mix_bias=False,mode = ‘weighted_sum’,back_need=True)

  • mix_bias:融合偏置项

  • mode:
    1.weighted_sum:需要两个模型参数;模型参数A是基底,A和B。结果计算为A·(1-rate)+B·rate

    2.add_difference:一般需要三个模型参数;A、 B和C。结果计算为A+(B-C)·rate

  • back_need:将当前加载到内存刚刚融合的模型参数,回退到融合前的状态

用rate_last记录是否运行,方便多次运行

from PIL import Image
import random
import time
from matplotlib import pyplot as plt
from ppdiffusers import *
def get_image_list(pipe,rate_list,base_dict,second_dict,third_dict,mode):
    prompt = 'masterpiece,(best quality),absurdres,highres,(facial),1girl,pov,seductive smile,blunt bangs,beautiful lovely sweet cute girl lovely sweet loli moe face with blush cheeks,extremely detailed face,symmetric highly detailed eyes, fantastic eyes, intricate eyes,long eyelashes, cinematic lighting sharp focus bokeh'
    negative_prompt = 'nsfw,(worst quality, low quality:1.4),monochrome,blur,bad fingers'
    seed = 1145141919
    num_images = len(rate_list)
    width,height = 512,512
    # width,height = 256,256
    pipe.scheduler =  schedulers.DPMSolverMultistepScheduler.from_config(
                pipe.scheduler.config,
                thresholding=False,
                algorithm_type="dpmsolver++",
                solver_type="midpoint",
                lower_order_final=True,
            )
    # pipe.scheduler =  schedulers.DDIMScheduler.from_config(pipe.scheduler.config)
    guidance_scale = 11
    num_inference_steps= 40 
    img_path_list = []
    container_list = []
    for i in range(num_images):
        base_dict = module_mix(base_dict,second_dict,third_dict,rate = rate_list[i],back_need=True,mode=mode)
        pipe.unet.set_dict(base_dict)
        
        cur_seed = random.randint(0, 2**32) if seed == -1 else seed
        # save_path = "outputs/test/out_rate"+str(rate_list[i])+'_'+str(cur_seed)+'_'+time.strftime('%Y-%m-%d_%H-%M-%S')+".png"
        # print('seed:',cur_seed,'save_path: ',save_path)
        # img_path_list.append(save_path)
        container = pipe.text2img(
            pipe=pipe,
            prompt=prompt,
            negative_prompt=negative_prompt,
            width = width,
            height = height,
            guidance_scale=guidance_scale, 
            num_inference_steps=num_inference_steps, 
            max_embeddings_multiples=5,
            seed=cur_seed).images[0]
        container_list.append(container)
        # container.save(save_path)
    # image_box_show(rate_list,container_list,name=mode)
    return container_list
    
def image_box_show(container_list,title_list,name='weighted_sum'):
    plt.figure(figsize=(16,9))
    for i in range(len(title_list)):
        img = container_list[i]
        plt.subplot(2,5,i+1)
        plt.imshow(img)
        plt.title(title_list[i])
        plt.xticks([])
        plt.yticks([])
    plt.tight_layout()
    images_pic = plt.gcf()
    images_save = 'outputs/'+name+'_from_'+str(title_list[0])+'_to_'+str(title_list[-1])+'_.png'
    images_pic.savefig( images_save)
    print(images_save)
    plt.show()

rate_last = -1
def module_mix(base_dict,second_dict,third_dict=None,rate=0.5,mix_bias=False,mode = 'weighted_sum',back_need=True):
    global rate_last
    
    rate = float(rate)
    if(third_dict is None):
        third_dict = base_dict
    if((rate ==0 or rate ==1 ) and back_need == True):
        print('error rate value to rollback')
        return base_dict
    if(rate_last != rate):
        if(rate_last !=-1 and back_need == True):
            print('now rate =',rate,'rate_last =',rate_last,'. rate changed,try to rollback')
            
            for name in base_dict:
                if(name.find('bias') != -1 and mix_bias == False):
                    continue
                if(mode == 'weighted_sum' ):
                    base_dict[name] = (base_dict[name] -  rate_last* second_dict[name])/(1. - rate_last)
                if(mode == 'add_difference' ):
                    base_dict[name] = base_dict[name]- (second_dict[name]- third_dict[name]) * rate_last

            print('module rollback finish')
    else:
        print('now rate = rate_last =',rate_last,',rate no change made')
        return base_dict
    
    rate_last = rate
    for name in base_dict:
        
        if(name.find('bias') != -1 and mix_bias == False):
            continue
        if(mode == 'weighted_sum' ):
            base_dict[name] = (1. - rate)  * base_dict[name] + rate* second_dict[name]
        if(mode == 'add_difference' ):
            base_dict[name] = base_dict[name] + (second_dict[name]- third_dict[name]) * rate
    
    print('module mix finish, rate = ',rate)
    return  base_dict

from ppdiffusers import *
#模型文件夹路径
base_model_path = "/home/aistudio/Linaqruf/anything-v3.0"
second_model_path = '/home/aistudio/basil_mix'
# third_model_path = second_model_path
third_model_path = '/home/aistudio/Pastelmix'

#路径为待融合的模型的unet文件夹
second_module_path = second_model_path+'/unet'
third_module_path = third_model_path +'/unet'

观察不同比例下融合的效果

方式A weighted_sum 测试融合anything-v3、basil_mix

参数是浅复制,切换融合方式需重新读取主模型进行参数还原

# **参数是浅复制,切换融合方式需重新读取主模型进行参数还原**
# 用rate_last记录是否运行,方便多次运行
rate_last = -1
# 读主模型,提取主模型的模块参数
pipe = StableDiffusionPipelineAllinOne.from_pretrained(base_model_path, safety_checker = None, feature_extractor=None, requires_safety_checker=False)
base_dict = pipe.unet.state_dict()
# 提取待融合模块参数
second_module = UNet2DConditionModel.from_pretrained(second_module_path)
second_dict =  second_module.state_dict()
# third_module = UNet2DConditionModel.from_pretrained(third_module_path)
# third_dict =  third_module.state_dict()
rate_list=[0.+i*0.1 for i in range(10)]

container_list = get_image_list(pipe,rate_list,base_dict,second_dict,third_dict=None,mode='weighted_sum')
title_list = ['rate='+str(i) for i in rate_list]

image_box_show(container_list,title_list,name='weighted_sum')
outputs/weighted_sum_from_rate=0.0_to_rate=0.9_.png

在这里插入图片描述

方式B add_difference 测试融合anything-v3、basil_mix和pastelmix

参数是浅复制,切换融合方式需重新读取主模型进行参数还原

# **参数是浅复制,切换融合方式需重新读取主模型进行参数还原**
# 用rate_last记录是否运行,方便多次运行
rate_last = -1
# 读主模型,提取主模型的模块参数
pipe = StableDiffusionPipelineAllinOne.from_pretrained(base_model_path, safety_checker = None, feature_extractor=None, requires_safety_checker=False)
base_dict = pipe.unet.state_dict()
# 提取待融合模块参数
second_module = UNet2DConditionModel.from_pretrained(second_module_path)
second_dict =  second_module.state_dict()
third_module = UNet2DConditionModel.from_pretrained(third_module_path)
third_dict =  third_module.state_dict()
rate_list=[0.+i*0.1 for i in range(10)]
container_list = get_image_list(pipe,rate_list,base_dict,second_dict,third_dict,mode = 'add_difference')
title_list = ['rate='+str(i) for i in rate_list]
image_box_show(container_list,title_list,name='add_difference')
outputs/add_difference_from_rate=0.0_to_rate=0.9_.png

在这里插入图片描述

从观察结果取合适的融合比例进行模型保存

取上面add_difference,rate=0.3融合方式的模型,保存到mix_model文件夹

#取上面add_difference,rate=0.3融合方式的模型,保存到mix_model文件夹
base_dict = module_mix(base_dict,second_dict,third_dict,rate=0.3,mode = 'add_difference')

pipe.unet.set_dict(base_dict)
pipe.save_pretrained('mix_model')
now rate = 0.3 rate_last = 0.9 . rate changed,try to rollback
module rollback finish
module mix finish, rate =  0.3


[2023-02-10 12:34:10,584] [    INFO] - Configuration saved in mix_model/text_encoder/config.json
[2023-02-10 12:34:11,851] [    INFO] - tokenizer config file saved in mix_model/tokenizer/tokenizer_config.json
[2023-02-10 12:34:11,855] [    INFO] - Special tokens file saved in mix_model/tokenizer/special_tokens_map.json

读取保存的模型进行文生图

#  读模型
from ppdiffusers import *
model_name = "/home/aistudio/mix_model"
pipe = StableDiffusionPipelineAllinOne.from_pretrained(model_name, safety_checker = None, feature_extractor=None, requires_safety_checker=False)
prompt = 'masterpiece,(best quality),absurdres,highres,(facial),1girl,pov,seductive smile,blunt bangs,beautiful lovely sweet cute girl lovely sweet loli moe face with blush cheeks,\
    extremely detailed face,symmetric highly detailed eyes, fantastic eyes, intricate eyes,long eyelashes, cinematic lighting sharp focus bokeh'
negative_prompt = 'nsfw,(worst quality, low quality:1.4),monochrome,blur,bad fingers'
# seed = -1
seed = 114514
num_images = 1
width,height = 512,512
# width,height = 256,256
pipe.scheduler =  schedulers.DPMSolverMultistepScheduler.from_config(
            pipe.scheduler.config,
            thresholding=False,
            algorithm_type="dpmsolver++",
            solver_type="midpoint",
            lower_order_final=True,
        )
# pipe.scheduler =  schedulers.DDIMScheduler.from_config(pipe.scheduler.config)
guidance_scale = 11
num_inference_steps= 40 
img_path_list = []
container_list = []
for i in range(num_images):
    cur_seed = random.randint(0, 2**32) if seed == -1 else seed
    save_path = "outputs/test/out_"+str(cur_seed)+'_'+time.strftime('%Y-%m-%d_%H-%M-%S')+".png"
    print('seed:',cur_seed,'save_path: ',save_path)
    img_path_list.append(save_path)
    container = pipe.text2img(
        pipe=pipe,
        prompt=prompt,
        negative_prompt=negative_prompt,
        width = width,
        height = height,
        guidance_scale=guidance_scale, 
        num_inference_steps=num_inference_steps, 
        max_embeddings_multiples=5,
        seed=cur_seed).images[0]
    container_list.append(container)
    container.save(save_path)
display(container)
seed: 114514 save_path:  outputs/test/out_114514_2023-02-10_12-35-39.png

    seed: 114514 save_path:  outputs/test/out_114514_2023-02-10_12-35-39.png



      0%|          | 0/40 [00:00<?, ?it/s]



    
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-A4rHGs61-1676214161555)(main_files/main_19_2.png)]
    


## 打包模型以便下载到本地


```python
# !zip -r0 mix_model.tar mix_model
!tar -cvf mix_model.tar mix_model

总结

模型融合具有随机性,但能有效的结合多个模型的特点。失去特点来以长补短

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值