读取图片和pth参数文件并预测(vgg19和dense)

 大文件传输压缩shell命令

sudo apt-get install  p7zip-full
7z a vgg19bn.7z vgg19_bn.pth
# In[]
import torch,torchvision
import torchvision.transforms as transform
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
# In[1]:
classes=['0', '10', '102', '13', '14', '15', '16', '17',
 '18', '19', '2', '20', '3', '4', '5', '6', '7', '8', '9']
# **Normalize and load the data**
# ToTensor 将 [0, 255] 的RGB图像数据缩放到[0.0, 1.0]之间,BRG
# Normalize 改变图片分布至均值为0,标准差为1,加快模型的收敛
transformer = transform.Compose([
                                transform.Resize(100),#把图像最小的一个边长放缩到100,另一边按照相同比例放缩。
                                transform.ToTensor(),
                                transform.Normalize([0.5, 0.5, 0.5],
                                                    [0.5, 0.5, 0.5])
                                ])

# In[2]
from torchvision.datasets import ImageFolder
# path = './datadev/'
# training = ImageFolder(path+'/train', transform=transformer)
# testset = ImageFolder(path+'/test/', 
                        #    transform=transformer)
# In[2] 
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')  # 获取初始化网络的权重
vgg19M = torchvision.models.vgg19_bn(pretrained=False).to(device)                 # 初始化网络
pre_dict = torch.load('vgg19_bn.pth')

#重置末位线性全连接层张量层数(tensor/out_features)
vgg19M.classifier[6] = nn.Linear(4096, len(classes), bias=True)
# 加载预训练网络权重
model_dict = vgg19M.state_dict()
# 取出除不匹配层之外的所有层(去掉线性层一定会影响结果
# pre_dict = {k: v for k, v in pre_dict.items() if (k in model_dict and 'classifier.6' not in k)} 
# 将pre_dict的权重更新到model_dict
model_dict.update(pre_dict)
vgg19M.load_state_dict(model_dict)
print(vgg19M)

# In[4]:
# **Let's write the accuracy function so that we don't have to write it several times**
def accuracy(outputs, labels):
    _, preds = torch.max(outputs, dim=1) #最大预测概率,对应类别
    return torch.tensor(torch.sum(preds == labels).item() / len(preds)), preds

# In[5]:
# **Let's write a model class that contains 5 already trained models**
class Ensemble(nn.Module):
    def __init__(self, device):
        super(Ensemble,self).__init__()
        # you should use nn.ModuleList. Optimizer doesn't detect python list as parameters
        self.models = nn.ModuleList([vgg19M])
        
    def forward(self, x):
        # it is super simple. just forward num_ models and concat it.
        output = torch.zeros([x.size(0), len(classes)]).to(device)
        for model in self.models:
            output += model(x)
        return output

# In[6]:
model =  Ensemble(device)
# In[7]:
def validation_step(batch):
        images,labels = batch
        images,labels = images.to(device),labels.to(device)
        out = model(images)                                      
        loss = F.cross_entropy(out, labels)                    
        acc,preds = accuracy(out, labels)                       
        return {'val_loss': loss.detach(), 'val_acc':acc.detach(), 
                'preds':preds.detach(), 'labels':labels.detach()}

# In[10]:
def test_prediction(outputs):
       batch_losses = [x['val_loss'] for x in outputs]
       epoch_loss = torch.stack(batch_losses).mean()           
       batch_accs = [x['val_acc'] for x in outputs]
       epoch_acc = torch.stack(batch_accs).mean()             
       # combine predictions
       batch_preds = [pred for x in outputs for pred in x['preds'].tolist()] 
       # combine labels
       batch_labels = [lab for x in outputs for lab in x['labels'].tolist()]  
       
       return {'test_loss': epoch_loss.item(), 'test_acc': epoch_acc.item(),
               'test_preds': batch_preds, 'test_labels': batch_labels}


# In[11]:
@torch.no_grad()
def test_predict(model, test_loader):
    model.eval()
    # perform testing for each batch
    outputs = [validation_step(batch) for batch in test_loader] 
    results = test_prediction(outputs)                          
    print('test_loss: {:.4f}, test_acc: {:.4f}'
          .format(results['test_loss'], results['test_acc']))
    return results['test_preds'], results['test_labels']

# In[12]
from torch.utils.data import Dataset, DataLoader
vgg19M.to(device)
model.eval()
# In[13]:
def norm_out(img):
    img = img.permute(1,2,0)
    mean = torch.FloatTensor([0.5, 0.5, 0.5])
    std = torch.FloatTensor([0.5, 0.5, 0.5])
    img = img*std + mean
    return np.clip(img,0,1)

# In[15]
imgpath = "4.jpg"#你的图片路径
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
# i = np.random.randint(0, high=len(testset))

image = Image.open(imgpath)
img = transformer(image)

m = nn.Softmax(dim=1)
percent = m(model(img.to(device).unsqueeze(0)))
predmax3percent = torch.sort(percent[0])[0][-3:]
predmax3inds = torch.sort(percent[0])[1][-3:]
class_name = np.array([classes[predmax3inds[-3]], classes[predmax3inds[-2]],classes[predmax3inds[-1]]])
plt.imshow(norm_out(img))
int(classes[predmax3inds[-1]]),round((predmax3percent[-1]).item(), 5)

# %%

PyTorch 学习笔记(三):transforms的二十二个方法-CSDN博客

# In[]
import torch,torchvision
import torchvision.transforms as transform
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
# In[1]:
imgpath = "3.jpg"#你的图片路径
# **Normalize and load the data**
# ToTensor 将 [0, 255] 的RGB图像数据缩放到[0.0, 1.0]之间,BRG
# Normalize 改变图片分布至均值为0,标准差为1,加快模型的收敛
transformer = transform.Compose([
                                transform.Resize(120),
                                transform.ToTensor(),
                                transform.Normalize([0.5, 0.5, 0.5],
                                                    [0.5, 0.5, 0.5])
                                ])

# In[2]
from torchvision.datasets import ImageFolder
path = './data/'
training = ImageFolder(path+'/train', transform=transformer)
testset = ImageFolder(path+'/test/', 
                           transform=transformer)
# In[2] 
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')  # 获取初始化网络的权重
densenetM = torchvision.models.densenet121(pretrained=False).to(device)                 # 初始化网络
pre_dict = torch.load('densenet121_1.pth')
# print(pre_dict)#参数保存为字典
#重置末位线性全连接层张量层数(tensor/out_features)
densenetM.classifier = nn.Linear(1024, len(testset.classes), bias=True)
# 加载预训练网络权重
model_dict = densenetM.state_dict()
# 取出除不匹配层之外的所有层(去掉线性层一定会影响结果
# pre_dict = {k: v for k, v in pre_dict.items() if (k in model_dict and 'classifier.6' not in k)} 
# 将pre_dict的权重更新到model_dict
model_dict.update(pre_dict)
densenetM.load_state_dict(model_dict)
print(densenetM)

# In[4]:
# **Let's write the accuracy function so that we don't have to write it several times**
def accuracy(outputs, labels):
    _, preds = torch.max(outputs, dim=1) #最大预测概率,对应类别
    return torch.tensor(torch.sum(preds == labels).item() / len(preds)), preds

# In[5]:
# **Let's write a model class that contains 5 already trained models**
class Ensemble(nn.Module):
    def __init__(self, device):
        super(Ensemble,self).__init__()
        # you should use nn.ModuleList. Optimizer doesn't detect python list as parameters
        self.models = nn.ModuleList([densenetM])
        
    def forward(self, x):
        # it is super simple. just forward num_ models and concat it.
        output = torch.zeros([x.size(0), len(testset.classes)]).to(device)
        for model in self.models:
            output += model(x)
        return output

# In[6]:
model =  Ensemble(device)
# In[7]:
def validation_step(batch):
        images,labels = batch
        images,labels = images.to(device),labels.to(device)
        out = model(images)                                      
        loss = F.cross_entropy(out, labels)                    
        acc,preds = accuracy(out, labels)                       
        return {'val_loss': loss.detach(), 'val_acc':acc.detach(), 
                'preds':preds.detach(), 'labels':labels.detach()}

# In[10]:
def test_prediction(outputs):
       batch_losses = [x['val_loss'] for x in outputs]
       epoch_loss = torch.stack(batch_losses).mean()           
       batch_accs = [x['val_acc'] for x in outputs]
       epoch_acc = torch.stack(batch_accs).mean()             
       # combine predictions
       batch_preds = [pred for x in outputs for pred in x['preds'].tolist()] 
       # combine labels
       batch_labels = [lab for x in outputs for lab in x['labels'].tolist()]  
       
       return {'test_loss': epoch_loss.item(), 'test_acc': epoch_acc.item(),
               'test_preds': batch_preds, 'test_labels': batch_labels}


# In[11]:
@torch.no_grad()
def test_predict(model, test_loader):
    model.eval()
    # perform testing for each batch
    outputs = [validation_step(batch) for batch in test_loader] 
    results = test_prediction(outputs)                          
    print('test_loss: {:.4f}, test_acc: {:.4f}'
          .format(results['test_loss'], results['test_acc']))
    return results['test_preds'], results['test_labels']

# In[12]
from torch.utils.data import Dataset, DataLoader
test_dl = DataLoader(testset, batch_size=256)
densenetM.to(device)
preds,labels = test_predict(densenetM, test_dl)

# In[13]:
def norm_out(img):
    img = img.permute(1,2,0)
    mean = torch.FloatTensor([0.5, 0.5, 0.5])
    std = torch.FloatTensor([0.5, 0.5, 0.5])
    img = img*std + mean
    return np.clip(img,0,1)

# In[15]
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
i = np.random.randint(0, high=len(testset))

image = Image.open(imgpath)
img = transformer(image)
# training.classes=['0','10','102','13','14','15','16','17','18','19','2','21','3','4','5','6','7','8','9']
m = nn.Softmax(dim=1)
percent = m(model(img.to(device).unsqueeze(0)))
predmax3percent = torch.sort(percent[0])[0][-3:]
predmax3inds = torch.sort(percent[0])[1][-3:]
classes = np.array([testset.classes[predmax3inds[-3]], testset.classes[predmax3inds[-2]],testset.classes[predmax3inds[-1]]])
class_name = testset.classes
plt.imshow(norm_out(img))
int(testset.classes[predmax3inds[-1]]),round((predmax3percent[-1]).item(), 5)

# %%

  • 10
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值