分别用Tensorflow2.0-alpha 和 pytorch1.0 写一个二分类的例子

直接附上我的github

https://github.com/qichenghan666/tf2.0-pytorch1.0-demo

 

请在gpu环境下运行 数据下载地址:http://parnec.nuaa.edu.cn/xtan/data/datasets/dataset_B_Eye_Images.rar (数据很小,放心使用)下载后将数据放入dataset文件夹,合并closedLeftEyes和closedRightEyes文件夹中的数据为closedEyes, 合并openLeftEyes和openRightEyes为openEyes. 并且从中sample一些图片放入同路径下的文件夹open_test, close_test

下面是分别用tensorflow2.0和pytorch1.0写的代码,两个已经非常像了。

TF代码:

# -*- coding: utf-8 -*-
"""
Created on Sat May 11 12:56:47 2019

@author: hqc
"""

import tensorflow as tf
print(tf.__version__)
print(tf.test.is_gpu_available())
import glob
import os
from tensorflow import keras
from tensorflow.keras import layers, Model, optimizers
from PIL import Image
import numpy as np


def _read_data(dataset_dir):
    data_dir = os.path.join('./dataset/', dataset_dir)
    data_dir = data_dir + '/*.jpg'
    eye_data = glob.glob(data_dir)
    x = []
    for img in eye_data:
        img = Image.open(img)
        img = np.asarray(img)
        x.append(img)
    x = np.array(x)
    x = tf.cast(x, dtype=tf.float32) / 255.
    x = tf.expand_dims(x,axis=3)
    # print(x.shape, y.shape)
    return x

def preprocess(class1_dir, class2_dir):
    x1 = _read_data(class1_dir)
    y1 = tf.ones([x1.shape[0]], dtype=tf.int32)

    x2 = _read_data(class2_dir)
    y2 = tf.zeros([x2.shape[0]], dtype=tf.int32)

    x = tf.concat([x1, x2], axis=0)
    y = tf.concat([y1, y2], axis=0)
    return x, y

train_x, train_y = preprocess('closedEyes', 'openEyes')
test_x, test_y = preprocess('close_test', 'open_test')

# print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)

train_db = tf.data.Dataset.from_tensor_slices((train_x, train_y))
train_db = train_db.shuffle(100000).batch(64)
test_db = tf.data.Dataset.from_tensor_slices((test_x, test_y))
test_db = test_db.batch(64).shuffle(100000)

sample = next(iter(train_db))
# print(sample[0].shape, sample[1].shape)


class Basenet(keras.Model):
    def __init__(self):
        super(Basenet, self).__init__()

        self.conv1 = layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu)
        self.conv2 = layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu)
        self.maxpool1 = layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same')

        self.conv3 = layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu)
        self.conv4 = layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu)
        self.maxpool2 = layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same')

        self.conv5 = layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu)
        self.conv6 = layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu)
        self.maxpool3 = layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same')

        self.conv7 = layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu)
        self.conv8 = layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu)
        self.maxpool4 = layers.MaxPool2D(pool_size=[4, 4], strides=4, padding='same')

        self.dense1 = layers.Dense(256, activation=tf.nn.relu)
        self.dense2 = layers.Dense(128, activation=tf.nn.relu)
        self.dense3 = layers.Dense(2)

    def call(self, input, training=None):
        x = self.conv1(input)
        x = self.conv2(x)
        x = self.maxpool1(x)

        x = self.conv3(x)
        x = self.conv4(x)
        x = self.maxpool2(x)

        x = self.conv5(x)
        x = self.conv6(x)
        x = self.maxpool3(x)

        x = self.conv7(x)
        x = self.conv8(x)
        x = self.maxpool4(x)

        x = tf.reshape(x, [-1, 512])
        x = self.dense1(x)
        x = self.dense2(x)
        x = self.dense3(x)


        return x


basenet = Basenet()
basenet.build(input_shape=(None, 24, 24, 1))
basenet.summary()

# tensorboard
log_dir = './model/'
summary_writer = tf.summary.create_file_writer(log_dir)


optimizer = optimizers.Adam(learning_rate=1e-4)
variable = basenet.trainable_variables

for epoch in range(5):
    for step, (x, y) in enumerate(train_db):
        with tf.GradientTape() as tape:
            logit = basenet(x)


            y_onehot = tf.one_hot(y, depth=2)

            loss = tf.losses.categorical_crossentropy(y_onehot, logit, from_logits=True)
            loss = tf.reduce_mean(loss)

        grads = tape.gradient(loss, variable)
        optimizer.apply_gradients(zip(grads, variable))

        if step % 10 == 0:
            print(epoch, step, 'loss:', float(loss))
            basenet.save_weights('./model/weights.ckpt')
            print('saved weights. ')

    with summary_writer.as_default():
        tf.summary.scalar('loss', float(loss), step=epoch)

    total_num = 0
    total_correct = 0
    for x, y in test_db:
        logit = basenet(x)


        prob = tf.nn.softmax(logit, axis=1)
        pred = tf.argmax(prob, axis=1)
        pred = tf.cast(pred, dtype=tf.int32)

        correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
        correct = tf.reduce_sum(correct)


        total_num += x.shape[0]
        total_correct += int(correct)
    acc = total_correct / total_num
    # print(total_num, total_correct)
    print('epoch:', epoch, 'acc:', acc)


del basenet

basenet = Basenet()
basenet.load_weights('./model/weights.ckpt')
total_num = 0
total_correct = 0
for x, y in test_db:
    logit = basenet(x)

    prob = tf.nn.softmax(logit, axis=1)
    pred = tf.argmax(prob, axis=1)
    pred = tf.cast(pred, dtype=tf.int32)

    correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
    correct = tf.reduce_sum(correct)

    total_num += x.shape[0]
    total_correct += int(correct)
acc = total_correct / total_num
print('acc:', acc)

Pytorch1.0代码:

import torch
print(torch.__version__)
print(torch.cuda.is_available())
import os
import glob
from PIL import Image
import numpy as np
from torch.utils.data import DataLoader, Dataset
from torch import nn
from torch.nn import functional as F
from torch import optim
from visdom import Visdom

# 自定义数据集 :二分类
class myDataset(Dataset):
    def __init__(self, class1_dir, class2_dir):
        self.x, self.y = self.preprocess(class1_dir, class2_dir)

    def __getitem__(self, index):
        return self.x[index], self.y[index]

    def __len__(self):
        return self.x.shape[0]

    def _read_data(self, dataset_dir):
        data_dir = os.path.join('./dataset/', dataset_dir)
        data_dir = data_dir + '/*.jpg'
        eye_data = glob.glob(data_dir)
        x = []
        for img in eye_data:
            img = Image.open(img)
            img = np.asarray(img)
            x.append(img)
        x = np.array(x)
        x = torch.from_numpy(x).float() / 255.
        x = torch.unsqueeze(x, dim=3)
        return x

    # 预处理
    def preprocess(self, class1_dir, class2_dir):
        x1 = self._read_data(class1_dir)
        y1 = torch.ones(x1.shape[0])

        x2 = self._read_data(class2_dir)
        y2 = torch.zeros(x2.shape[0])

        x = torch.cat([x1, x2], dim=0)
        y = torch.cat([y1, y2], dim=0)
        return x, y


train_dataset = myDataset('closedEyes', 'openEyes')
test_dataset = myDataset('close_test', 'open_test')
# print(train_x.type(), train_y.type(), test_x.type(), test_y.type())

train_db = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_db = DataLoader(test_dataset, batch_size=64, shuffle=True)

# x, y = iter(train_db).next()
# print(x.shape, y.shape)

class Basenet(nn.Module):
    def __init__(self, input, output):
        super(Basenet, self).__init__()

        self.conv1 = nn.Conv2d(input, 64, kernel_size=[3, 3], padding=1)
        self.relu1 = nn.ReLU()

        self.conv2 = nn.Conv2d(64, 64, kernel_size=[3, 3], padding=1)
        self.relu2 = nn.ReLU()
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)


        self.conv3 = nn.Conv2d(64, 128, kernel_size=[3, 3], padding=1)
        self.relu3 = nn.ReLU()
        self.conv4 = nn.Conv2d(128, 128, kernel_size=[3, 3], padding=1)
        self.relu4 = nn.ReLU()
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv5 = nn.Conv2d(128, 256, kernel_size=[3, 3], padding=1)
        self.relu5 = nn.ReLU()
        self.conv6 = nn.Conv2d(256, 256, kernel_size=[3, 3], padding=1)
        self.relu6 = nn.ReLU()
        self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv7 = nn.Conv2d(256, 512, kernel_size=[3, 3], padding=1)
        self.relu7 = nn.ReLU()
        self.conv8 = nn.Conv2d(512, 512, kernel_size=[3, 3], padding=1)
        self.relu8 = nn.ReLU()
        self.maxpool4 = nn.MaxPool2d(kernel_size=4, stride=4)

        self.dense1 = nn.Linear(512, 256)
        self.relu9 = nn.ReLU()
        self.dense2 = nn.Linear(256, 128)
        self.relu10 = nn.ReLU()
        self.dense3 = nn.Linear(128, output)

    def forward(self, inputs):
        x = self.conv1(inputs)
        x = self.relu1(x)
        x = self.conv2(x)
        x = self.relu2(x)

        x = self.maxpool1(x)

        x = self.conv3(x)
        x = self.relu3(x)

        x = self.conv4(x)

        x = self.relu4(x)
        x = self.maxpool2(x)

        x = self.conv5(x)
        x = self.relu5(x)
        x = self.conv6(x)
        x = self.relu6(x)
        x = self.maxpool3(x)

        x = self.conv7(x)
        x = self.relu7(x)
        x = self.conv8(x)
        x = self.relu8(x)
        x = self.maxpool4(x)

        x = torch.reshape(x, [-1, 512])
        x = self.dense1(x)
        x = self.relu9(x)
        x = self.dense2(x)
        x = self.relu10(x)
        x = self.dense3(x)


        return x


device = torch.device('cuda')
basenet = Basenet(24, 2).to(device)
# x, _ = iter(train_db).next()
# test = basenet(x.to(device))
# print(basenet)

criteon = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(basenet.parameters(), lr=1e-4)

viz = Visdom()
viz.line([0.], [0.], win='train_loss', opts=dict(title='train_loss'))

globals_step = 0
for epoch in range(10):
    # train mode
    basenet.train()
    for step, (x, y) in enumerate(train_db):
        x, y = x.to(device), y.to(device)
        # forward
        logit = basenet(x)
        # loss
        # print(logit.type(), y.type())
        # logit = logit.long()
        loss = criteon(logit, y.long())
        # grads
        optimizer.zero_grad()
        loss.backward()
        # update
        optimizer.step()
        if step % 10 == 0:
            print('epoch:', epoch, 'loss:', loss.item())

        globals_step += 1
        viz.line([loss.item()], [globals_step], win='train_loss', update='append')

    # turn to eval mode
    basenet.eval()
    with torch.no_grad():
        total_num = 0
        total_correct = 0
        for x, y in test_db:
            x, y = x.to(device), y.to(device)
            logit = basenet(x)
            prob = F.softmax(logit, dim=1)
            pred = torch.argmax(prob, dim=1)
            correct = torch.eq(pred, y.long()).sum().item()

            total_num += x.shape[0]
            total_correct +=correct
        acc = total_correct / total_num
        print('epoch:', epoch, 'acc:', acc)

torch.save(basenet.state_dict(), 'eyes.pkl')

del basenet

basenet = Basenet(24, 2).to(device)
basenet.load_state_dict(torch.load('eyes.pkl'))

basenet.eval()
with torch.no_grad():
    total_num = 0
    total_correct = 0
    for x, y in test_db:
        x, y = x.to(device), y.to(device)
        logit = basenet(x)
        prob = F.softmax(logit, dim=1)
        pred = torch.argmax(prob, dim=1)
        correct = torch.eq(pred, y.long()).sum().item()

        total_num += x.shape[0]
        total_correct += correct
    acc = total_correct / total_num
    print('epoch:', epoch, 'acc:', acc)

params = basenet.state_dict()
for k, v in params.items():
    print(k)  # 打印网络中的变量名
print(params['conv1.weight'])  # 打印conv1的weight
print(params['conv1.bias'])  # 打印conv1的bias

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值