pytorch 实践二

#coding=utf-8
import torch
import numpy as np
import torch.nn as nn
from torchvision import datasets, transforms
from torch.utils import data
import os
from PIL import Image
import torch.nn.functional as F
from torch.autograd import Variable

def default_loader(path):
    return Image.open(path).convert('RGB')

class MyDataset(data.Dataset):
    def __init__(self, txt, transform=None, target_transform=None, loader=default_loader):
        fh = open(txt, 'r')
        imgs = []
        for line in fh:
            line = line.strip('\n')
            line = line.rstrip()
            words = line.split()
            imgs.append((words[0],int(words[1])))
        self.imgs = imgs
        self.transform = transform
        self.target_transform = target_transform
        self.loader = loader

    def __getitem__(self, index):
        fn, label = self.imgs[index]  # fn为路径,label为标签
        img = self.loader(fn)
        if self.transform is not None:
            img = self.transform(img)
        return img,label

    def __len__(self):
        return len(self.imgs)
root="./data"
train_data=MyDataset(txt=root+'train.txt', transform=transforms.ToTensor())
test_data=MyDataset(txt=root+'test.txt', transform=transforms.ToTensor())
train_loader = data.DataLoader(dataset=train_data, batch_size=64, shuffle=True,num_workers=2)
test_loader = data.DataLoader(dataset=test_data, batch_size=64,num_workers=2)
#print train_data[0][1]

num_inputs = 28*28
num_outputs = 10
w = np.random.normal(scale=1.0,size=(num_inputs,num_outputs))
b = np.zeros(num_outputs)
para = [w,b]

def softmax(X):
    exp = torch.exp(X).numpy()
    print exp
    add = np.sum(exp,axis=1,keepdims=True)
    print add

    # partition = torch.div(exp,add)
    partition = exp / add
    return torch.from_numpy(partition)

# X = torch.randn((3,2))
# X_prob = softmax(X)
# print X_prob

def net1(X):
    return softmax(torch.dot(X.view((-1, num_inputs)), w) + b)

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Sequential(
            torch.nn.Conv2d(3, 32, 3, 1, 1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2))
        self.conv2 = torch.nn.Sequential(
            torch.nn.Conv2d(32, 64, 3, 1, 1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2)
        )
        self.conv3 = torch.nn.Sequential(
            torch.nn.Conv2d(64, 64, 3, 1, 1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2)
        )
        self.dense = torch.nn.Sequential(
            torch.nn.Linear(64 * 3 * 3, 128),
            torch.nn.ReLU(),
            torch.nn.Linear(128, 10))
    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = x.view(x.size(0),-1)  # 展平多维的卷积图成 (batch_size, 32 * 7 * 7)   x.size(0) 是x的行数
        out = self.dense(x)
        return out

model = Net()
model.cuda()
#print(model)
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
loss_func = nn.CrossEntropyLoss()

for epoch in range(1):
    train_loss = 0.
    train_acc = 0.
    for x,y in train_loader:
        x = Variable(x).cuda()
        y = Variable(y).cuda()
        out_put = model(x)
        #print 'output',out_put
        loss = loss_func(out_put, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.data[0]
        # print 'loss0',type(loss.data[0])
        # print 'loss1',type(loss.data)
        # print 'loss1111',loss.data
        pred = torch.max(out_put, 1)[1]  # 最大值所在的索引
        train_correct = (pred == y).sum()
        train_acc += train_correct.data[0]
        #print train_correct.data[0]  # 变成了标量
        print len(train_loader)  # 6000
    print('Train Loss: {:.6f}, Acc: {:.6f}'.format(train_loss / (len(
        train_data)), train_acc / (len(train_data))))

 # evaluation--------------------------------
'''
model.eval()
eval_loss = 0.
eval_acc = 0.
for batch_x, batch_y in test_loader:
    batch_x, batch_y = Variable(batch_x, volatile=True), Variable(batch_y, volatile=True)
    out = model(batch_x)
    loss = loss_func(out, batch_y)
    eval_loss += loss.data[0]
    pred = torch.max(out, 1)[1]
    num_correct = (pred == batch_y).sum()
    eval_acc += num_correct.data[0]
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
    test_data)), eval_acc / (len(test_data))))
'''

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值