python3 main.py --listen0.0.0.0 --port9000 --gpu-only
otal VRAM 24268 MB, total RAM 515638 MB
pytorch version: 2.0.1+cu117
WARNING[XFORMERS]: xFormers can't load C++/CUDA extensions. xFormers was built for:
PyTorch 2.3.0+cu121 with CUDA 1201 (you have 2.0.1+cu117)
Python 3.8.19 (you have 3.8.10)
Please reinstall xformers (see https://github.com/facebookresearch/xformers#installing-xformers)
Memory-efficient attention, SwiGLU, sparse and more won't be available.
Set XFORMERS_MORE_DETAILS=1formore details
Set vram state to: HIGH_VRAM
Device: cuda:0 NVIDIA GeForce RTX 3090: cudaMallocAsync
VAE dtype: torch.bfloat16
Using pytorch cross attention
Import timesfor custom nodes:
0.0 seconds: /workspace/code/github/ComfyUI/custom_nodes/websocket_image_save.py
Starting server
To see the GUI go to: http://0.0.0.0:9000
{"last_node_id":21,"last_link_id":28,"nodes":[{"id":14,"type":"VideoTriangleCFGGuidance","pos":[390,90],"size":{"0":240,"1":60},"flags":{},"order":2,"mode":0,"inputs":[{"name":"model","type":"MODEL","link":23,"slot_index":0}],"outputs":[{"name":"MODEL","type":"MODEL","links":[16],"shape":3,"slot_index":0}],"properties":{"Node name for S&R":"VideoTriangleCFGGuidance"},"widgets_values":[1]},{"id":10,"type":"SV3D_Conditioning","pos":[380,240],"size":{"0":250,"1":170},"flags":{},"order":3,"mode":0,"inputs":[{"name":"clip_vision","type":"CLIP_VISION","link":24,"slot_index":0},{"name":"init_image","type":"IMAGE","link":13,"slot_index":1},{"name":"vae","type":"VAE","link":25,"slot_index":2}],"outputs":[{"name":"positive","type":"CONDITIONING","links":[10],"shape":3,"slot_index":0},{"name":"negative","type":"CONDITIONING","links":[11],"shape":3,"slot_index":1},{"name":"latent","type":"LATENT","links":[18],"shape":3,"slot_index":2}],"properties":{"Node name for S&R":"SV3D_Conditioning"},"widgets_values":[576,576,21,0]},{"id":8,"type":"VAEDecode","pos":[888,-109],"size":{"0":210,"1":46},"flags":{},"order":5,"mode":0,"inputs":[{"name":"samples","type":"LATENT","link":7},{"name":"vae","type":"VAE","link":26}],"outputs":[{"name":"IMAGE","type":"IMAGE","links":[27,28],"slot_index":0}],"properties":{"Node name for S&R":"VAEDecode"}},{"id":12,"type":"LoadImage","pos":[-23,244],"size":{"0":315,"1":314},"flags":{},"order":0,"mode":0,"outputs":[{"name":"IMAGE","type":"IMAGE","links":[13],"shape":3},{"name":"MASK","type":"MASK","links":null,"shape":3}],"properties":{"Node name for S&R":"LoadImage"},"widgets_values":["captured_p (1).webp","image"]},{"id":20,"type":"ImageOnlyCheckpointLoader","pos":[-38,-68],"size":{"0":369.6000061035156,"1":98},"flags":{},"order":1,"mode":0,"outputs":[{"name":"MODEL","type":"MODEL","links":[23],"shape":3,"slot_index":0},{"name":"CLIP_VISION","type":"CLIP_VISION","links":[24],"shape":3,"slot_index":1},{"name":"VAE","type":"VAE","links":[25,26],"shape":3,"slot_index":2}],"properties":{"Node name for S&R":"ImageOnlyCheckpointLoader"},"widgets_values":["sv3d_p.safetensors"]},{"id":3,"type":"KSampler","pos":[670,90],"size":{"0":315,"1":262},"flags":{},"order":4,"mode":0,"inputs":[{"name":"model","type":"MODEL","link":16},{"name":"positive","type":"CONDITIONING","link":10},{"name":"negative","type":"CONDITIONING","link":11},{"name":"latent_image","type":"LATENT","link":18}],"outputs":[{"name":"LATENT","type":"LATENT","links":[7],"slot_index":0}],"properties":{"Node name for S&R":"KSampler"},"widgets_values":[523625528279418,"fixed",20,8,"dpmpp_2m","karras",1]},{"id":9,"type":"SaveImage","pos":[1688,-145],"size":{"0":291.955078125,"1":422.0332946777344},"flags":{},"order":6,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":27}],"properties":{},"widgets_values":["ComfyUI"]},{"id":21,"type":"SaveAnimatedWEBP","pos":[1082,158],"size":{"0":560,"1":530},"flags":{},"order":7,"mode":0,"inputs":[{"name":"images","type":"IMAGE","link":28}],"properties":{"Node name for S&R":"SaveAnimatedWEBP"},"widgets_values":["ComfyUI",6,true,80,"default",null]}],"links":[[7,3,0,8,0,"LATENT"],[10,10,0,3,1,"CONDITIONING"],[11,10,1,3,2,"CONDITIONING"],[13,12,0,10,1,"IMAGE"],[16,14,0,3,0,"MODEL"],[18,10,2,3,3,"LATENT"],[23,20,0,14,0,"MODEL"],[24,20,1,10,0,"CLIP_VISION"],[25,20,2,10,2,"VAE"],[26,20,2,8,1,"VAE"],[27,8,0,9,0,"IMAGE"],[28,8,0,21,0,"IMAGE"]],"groups":[],"config":{},"extra":{},"version":0.4}