一、前言
由于网络宽带速度不一,所以想着把transformers模型保存到本地,并实现本地加载
二、代码
1、下载模型,保存到本地
import torch
from transformers import AutoTokenizer, AutoModel
from transformers import CLIPProcessor, CLIPModel
# 指定预训练模型的名称或路径
model_name = "openai/clip-vit-base-patch16"
# 使用AutoModel和AutoTokenizer下载和加载模型及分词器
model = AutoModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# 保存模型权重和分词器到本地
output_folder = "/root/model" # 替换为您想要保存的文件夹路径
model.save_pretrained(output_folder)
tokenizer.save_pretrained(output_folder)