import os
import clip
import torch
# Load the model
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load('ViT-B/32', device)
text_inputs = clip.tokenize('hello').to(device)
with torch.no_grad():
text_features = model.encode_text(text_inputs)
text_features /= text_features.norm(dim=-1, keepdim=True)
clip加载text encoder
最新推荐文章于 2025-05-14 08:49:12 发布