最新融合多模态的理解和生成的大一统transform架构,show-o模型部署

Show-o是由字节跳动和新加坡国立大学Show Lab共同研发的一个多模态大模型,统一了多模态理解和生成。

Show-o的创新之处在于它将自回归和离散扩散建模相结合,以适应不同和混合模态的输入和输出。

Show-o模型的架构基于预训练的大型语言模型(LLM),并采用了离散去噪扩散来模拟离散图像标记,简化了额外文本编码器的需求。

Show-o采用了统一的提示策略,将图像和文本标记化后形成输入序列,并通过全方位的注意机制处理不同类型的信号,结合了因果注意和全面注意,以适应性地混合和变化。

Show-o在自回归生成图像时所需的采样步骤大约减少了20倍,显著减少了计算资源的消耗,并提高了模型的应用灵活性。

Show-o还天然支持多种下游应用,如文本引导的图像修复和外推,而无需任何微调,进一步展示了其作为下一代基础模型的潜力。

其中github项目地址:https://github.com/showlab/Show-o。

一、环境安装

1、python环境

建议安装python版本在3.10以上。

2、pip库安装

pip install torch==2.2.1+cu118 torchvision==0.17.1+cu118 torchaudio==2.2.1 --extra-index-url https://download.pytorch.org/whl/cu118

pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple

3、show-o模型下载

git lfs install

git clone https://huggingface.co/showlab/show-o

4、show-o-w-clip-vit模型下载

git lfs install

git clone https://huggingface.co/showlab/show-o-w-clip-vit

5、magvitv2模型下载

git lfs install

git clone https://huggingface.co/showlab/magvitv2

、功能测试

1、运行测试

(1)图片理解的python代码
 

import os
from PIL import Image
from tqdm import tqdm
import numpy as np
import torch
import wandb
from models import Showo, MAGVITv2
from prompting_utils import UniversalPrompting, create_attention_mask_for_mmu, create_attention_mask_for_mmu_vit
from utils import get_config, flatten_omega_conf, image_transform
from transformers import AutoTokenizer
from models.clip_encoder import CLIPVisionTower
from transformers import CLIPImageProcessor
import conversation as conversation_lib

# Set environment variables
os.environ["TOKENIZERS_PARALLELISM"] = "true"

# Set up conversation template
conversation_lib.default_conversation = conversation_lib.conv_templates["phi1.5"]
SYSTEM_PROMPT = ("A chat between a curious user and an artificial intelligence assistant. "
                 "The assistant gives helpful, detailed, and polite answers to the user's questions.")
SYSTEM_PROMPT_LEN = 28


def get_vq_model_class(model_type):
    if model_type == "magvitv2":
        return MAGVITv2
    else:
        raise ValueError(f"model_type {model_type} not supported.")


def initialize_wandb(config):
    resume_wandb_run = config.wandb.resume
    run_id = config.wandb.get("run_id", None)
    if run_id is None:
        resume_wandb_run = False
        run_id = wandb.util.generate_id()
        config.wandb.run_id = run_id

    wandb_config = {k: v for k, v in flatten_omega_conf(config, resolve=True)}

    wandb.init(
        project="demo",
        name=config.experiment.name + '_mmu',
        config=wandb_config,
    )
    return run_id


def prepare_model_and_tokenizer(config, device):
    tokenizer = AutoTokenizer.from_pretrained(config.model.showo.llm_model_path, padding_side="left")
    uni_prompting = UniversalPrompting(tokenizer, max_text_len=config.dataset.preprocessing.max_seq_length,
                                       special_tokens=("<|soi|>", "<|eoi|>", "<|sov|>", "<|eov|>", "<|t2i|>", "<|mmu|>", "<|t2v|>", "<|v2v|>", "<|lvg|>"),
                                       ignore_id=-100, cond_dropout_prob=config.training.cond_dropout_prob)

    vq_model = get_vq_model_class(config.model.vq_model.type)
    vq_model = vq_model.from_pretrained(config.model.vq_model.vq_model_name).to(device).eval()

    vision_tower = CLIPVisionTower("openai/clip-vit-large-patch14-336").to(device)
    clip_processor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14-336")

    model = Showo.from_pretrained(config.model.showo.pretrained_model_path).to(device).eval()

    return tokenizer, uni_prompting, vq_model, vision_tower, clip_processor, model


def process_image(image_path, config, device):
    image_ori = Image.open(image_path).convert("RGB")
    image = image_transform(image_ori, resolution=config.dataset.params.resolution).to(device).unsqueeze(0)
    pixel_values = CLIPImageProcessor.preprocess(image_ori, return_tensors="pt")["pixel_values"]
    return image, pixel_values.squeeze(0)


def generate_response(model, tokenizer, uni_prompting, vision_tower, pixel_values, image_tokens, question, device, configs):
    batch_size = 1
    
    if configs.model.showo.w_clip_vit:
        conv = conversation_lib.default_conversation.copy()
        conv.append_message(conv.roles[0], question)
        conv.append_message(conv.roles[1], None)
        
        prompt_question = conv.get_prompt().strip()
        input_ids_system = tokenizer(SYSTEM_PROMPT, return_tensors="pt", padding="longest").input_ids
  
        assert input_ids_system.shape[-1] == 28
        input_ids_system = input_ids_system.to(device)
        
        input_ids = tokenizer(prompt_question, return_tensors="pt", padding="longest").input_ids.squeeze(0)
        
        input_ids_combined = torch.cat([
            (torch.ones(1, 1) * uni_prompting.sptids_dict['<|mmu|>']).to(device),
            input_ids_system,
            (torch.ones(1, 1) * uni_prompting.sptids_dict['<|soi|>']).to(device),
            (torch.ones(1, 1) * uni_prompting.sptids_dict['<|eoi|>']).to(device),
            input_ids
        ], dim=1).long()

        image_embeddings = vision_tower(pixel_values[None]).squeeze(0)
        image_embeddings = model.mm_projector(image_embeddings)

        text_embeddings = model.showo.model.embed_tokens(input_ids_combined)

        input_embeddings = torch.cat([
            text_embeddings[:, :2 + SYSTEM_PROMPT_LEN, :],
            image_embeddings,
            text_embeddings[:, 2 + SYSTEM_PROMPT_LEN:, :]
        ], dim=1)

        attention_mask = create_attention_mask_for_mmu_vit(input_embeddings, system_prompt_len=SYSTEM_PROMPT_LEN)[0].unsqueeze(0)

        cont_toks_list = model.mmu_generate(input_embeddings=input_embeddings,
                                            attention_mask=attention_mask,
                                            max_new_tokens=100,
                                            top_k=1,
                                            eot_token=tokenizer.eos_token_id)
    else:
        input_ids = tokenizer('USER: \n' + question + ' ASSISTANT:', return_tensors='pt')['input_ids']
        input_ids = torch.cat([
            (torch.ones(1, 1) * uni_prompting.sptids_dict['<|mmu|>']).to(device),
            (torch.ones(1, 1) * uni_prompting.sptids_dict['<|soi|>']).to(device),
            image_tokens,
            (torch.ones(1, 1) * uni_prompting.sptids_dict['<|eoi|>']).to(device),
            (torch.ones(1, 1) * uni_prompting.sptids_dict['<|sot|>']).to(device),
            input_ids
        ], dim=1).long()

        attention_mask = create_attention_mask_for_mmu(input_ids, int(uni_prompting.sptids_dict['<|eoi|>']))

        cont_toks_list = model.mmu_generate(input_ids=input_ids,
                                            attention_mask=attention_mask,
                                            max_new_tokens=100,
                                            top_k=1,
                                            eot_token=uni_prompting.sptids_dict['<|eot|>'])

    cont_toks_list = torch.stack(cont_toks_list).squeeze()[None]
    text_response = tokenizer.batch_decode(cont_toks_list, skip_special_tokens=True)[0]
    return text_response


if __name__ == '__main__':
    config = get_config()
    run_id = initialize_wandb(config)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    tokenizer, uni_prompting, vq_model, vision_tower, clip_processor, model = prepare_model_and_tokenizer(config, device)

    file_list = os.listdir(config.mmu_image_root)
    responses = ['' for _ in range(len(file_list))]
    images = []

    for i, file_name in enumerate(tqdm(file_list)):
        image_path = os.path.join(config.mmu_image_root, file_name)
        image, pixel_values = process_image(image_path, config, device)
        images.append(image)
        image_tokens = vq_model.get_code(image) + len(uni_prompting.text_tokenizer)

        for question in config.question.split(' *** '):
            text_response = generate_response(model, tokenizer, uni_prompting, vision_tower, pixel_values, image_tokens, question, device, config)
            responses[i] += f'User: {question}\nAnswer: {text_response}\n'

    images = torch.cat(images, dim=0)
    images = torch.clamp((images + 1.0) / 2.0, 0.0, 1.0) * 255.0
    images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
    pil_images = [Image.fromarray(image) for image in images]

    wandb_images = [wandb.Image(image, caption=responses[i]) for i, image in enumerate(pil_images)]
    wandb.log({"multimodal understanding": wandb_images}, step=0)

未完......

更多详细的欢迎关注:杰哥新技术

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值