生成视频 zeroscope_v2_576w 学习笔记

目录

生成视频代码:

维度报错:

解决方法,修改代码:


已开源:

视频生成模型 Zeroscope开源 免费无水印

 

视频生成模型 Zeroscope_v2_576w 开源 - 腾讯云开发者社区-腾讯云

生成视频代码:

import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.utils import export_to_video
import os
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:7890'

os.environ["HF_TOKEN"] = "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"
# os.environ['HTTPS_PROXY'] = 'https://127.0.0.1:7890'

# pipe = DiffusionPipeline.from_pretrained(r"D:\360安全浏览器下载", torch_dtype=torch.float16)
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16,use_auth_token=os.environ["HF_TOKEN"])
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()

prompt = "Darth Vader is surfing on waves"
video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
video_path = export_to_video(video_frames)
print(video_path)

维度报错:

Traceback (most recent call last):
  File "E:\project\jijia\aaa.py", line 18, in <module>
    video_path = export_to_video(video_frames)
  File "D:\ProgramData\miniconda3\envs\pysd\lib\site-packages\diffusers\utils\export_utils.py", line 135, in export_to_video
    h, w, c = video_frames[0].shape
ValueError: too many values to unpack (expected 3)

解决方法,修改代码:

def export_to_video(
        video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], output_video_path: str = None, fps: int = 10
) -> str:
    if is_opencv_available():
        import cv2
    else:
        raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video"))

    if output_video_path is None:
        output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name

    # Convert PIL images to numpy arrays if needed
    if isinstance(video_frames[0], PIL.Image.Image):
        video_frames = [np.array(frame) for frame in video_frames]

    # Ensure the frames are in the correct format
    if isinstance(video_frames[0], np.ndarray):
        # Check if frames are 4-dimensional and handle accordingly
        if len(video_frames[0].shape) == 4:
            video_frames = [frame[0] for frame in video_frames]

        # Convert frames to uint8
        video_frames = [(frame * 255).astype(np.uint8) for frame in video_frames]

    # Ensure all frames are in (height, width, channels) format
    h, w, c = video_frames[0].shape
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h))

    for frame in video_frames:
        img = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        video_writer.write(img)

    video_writer.release()
    return output_video_path

def export_to_video_o(
    video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], output_video_path: str = None, fps: int = 10
) -> str:
    if is_opencv_available():
        import cv2
    else:
        raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video"))
    if output_video_path is None:
        output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name

    if isinstance(video_frames[0], np.ndarray):
        video_frames = [(frame * 255).astype(np.uint8) for frame in video_frames]

    elif isinstance(video_frames[0], PIL.Image.Image):
        video_frames = [np.array(frame) for frame in video_frames]

    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    h, w, c = video_frames[0].shape
    video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h))
    for i in range(len(video_frames)):
        img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR)
        video_writer.write(img)
    return output_video_path

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

AI算法网奇

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值