AnimateDiff还可以用于生成视觉上相似的视频,或从初始视频开始启用样式/角色/背景或其他编辑,使您能够无缝探索创造性的可能性。
import os os.environ["HF_ENDPOINT"] = "https://hf-mirror.com" mport imageio import requests import torch from diffusers import AnimateDiffVideoToVideoPipeline, DDIMScheduler, MotionAdapter from diffusers.utils import export_to_gif from io import BytesIO from PIL import Image # 加载MotionAdapter
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16) # load SD 1.5 based finetuned model model_id = "SG161222/Realistic_Vision_V5.1_noVAE" pipe = AnimateDiffVideoToVideoPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16) scheduler = DDIMScheduler.from_pretrained( model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1, ) pipe.scheduler = scheduler # 启用内存节约
pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() # 加载视频的函数 def load_video(file_path: str): images = [] if file_path.startswith(('http://', 'https://')): # If the file_path is a URL response = requests.get(file_path) response.raise_for_status() content = BytesIO(response.content) vid = imageio.get_reader(content) else: # Assuming it's a local file path vid = imageio.get_reader(file_path) for frame in vid: pil_image = Image.fromarray(frame) images.append(pil_image) return images video = load_video("https://hf-mirror.com/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif") output = pipe( video = video, prompt="panda playing a guitar, on a boat, in the ocean, high quality", negative_prompt="bad quality, worse quality", guidance_scale=7.5, num_inference_steps=25, strength=0.5, generator=torch.Generator("cpu").manual_seed(42), ) frames = output.frames[0] export_to_gif(frames, "animation03.gif")
以下为原始动态图片
以下为输出结果