PyQt5|手写数字识别实战(层)|CIFAR与VGG实战(卷积神经网络)

2021-04-25
学习报告

1、PyQt5图形和特效改进

font = QtGui.QFont()
font.setFamily("Arial") #括号里可以设置成自己想要的其它字体
font.setPointSize(18)
label_4.setFont(font)
button.setFont(font)
window.setWindowIcon(QIcon('12.png'))#图标

window.setStyleSheet("#MainWindow{border-image:url(09.jpg);}") #背景图
window.setWindowOpacity(0.9) #背景透明度
window.setStyleSheet("#MainWindow{background-color: yellow}")#背景颜色

详细参考

https://blog.csdn.net/xqlily/article/details/97467606?utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7EBlogCommendFromBaidu%7Edefault-5.control&dist_request_id=&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7EBlogCommendFromBaidu%7Edefault-5.control


https://blog.csdn.net/jia666666/article/details/81874045


https://blog.csdn.net/weixin_43283397/article/details/105995328?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522161892346616780261983965%2522%252C%2522scm%2522%253A%252220140713.130102334…%2522%257D&request_id=161892346616780261983965&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2allfirst_rank_v2~rank_v29-16-105995328.pc_search_result_hbase_insert&utm_term=pyqt%E8%AE%BE%E7%BD%AE%E5%AD%97%E4%BD%93%E5%A4%A7%E5%B0%8F

2、手写数字识别实战(层)
导入数据集,构造数据集(训练集和测试集),迭代器, Sequential,数据集的迭代 梯度计算·,测试


import  os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import tensorflow as tf
from    tensorflow import keras
from    tensorflow.keras import datasets, layers, optimizers, Sequential, metrics


def preprocess(x, y):

    #转换为tensor
    # x = tf.convert_to_tensor(x, dtype = tf.float32) /255.
    # y = tf.convert_to_tensor(y, dtype = tf.int32)

    x = tf.cast(x, dtype = tf.float32) /255. #tf.cast()数据类型转换
    y = tf.cast(y, dtype = tf.int32)
    return x,y


(x, y), (x_test, y_test) = datasets.fashion_mnist.load_data()#导入数据集 train和test
print(x.shape, y.shape)
# print(x_test.shape)

batchsz = 128
db = tf.data.Dataset.from_tensor_slices((x,y))#构造数据集 使数据集能被我们用于生成迭代数据进行训练
db = db.map(preprocess).shuffle(10000).batch(batchsz)#预处理

db_test = tf.data.Dataset.from_tensor_slices((x_test,y_test))#构造数据集
db_test = db_test.map(preprocess).batch(batchsz)#预处理 一次测试完 不用shuffle

db_iter = iter(db)
sample = next(db_iter)
print('batch:',sample[0].shape, sample[1].shape)

model = Sequential([
layers.Dense(256, activation=tf.nn.relu), #[b, 784] =>[b, 256]
layers.Dense(128, activation=tf.nn.relu), # [b, 256] =>[b, 126]
layers.Dense(64, activation=tf.nn.relu),  # [b, 128] =>[b, 64]
layers.Dense(32, activation=tf.nn.relu),  # [b, 64] =>[b, 32]
layers.Dense(10)  # [b, 32] =>[b, 10] 330 = 32*10 + 10
])
model.build(input_shape=[None, 28*28])
model.summary()#调试打印出网络

optimizers = optimizers.Adam(lr=1e-3) #优化器 w = w - lr*grad

def main():

    for epoch in range(30):

        for step, (x,y) in enumerate(db): #每次epoch完成一次数据集的迭代

            # x: [b, 28, 28] => [b, 784]
            # y: [b]
            x = tf.reshape(x, [-1, 28*28])

            with tf.GradientTape() as tape:#求梯度信息

                # [b, 784] = > [b, 10]
                logits = model(x) #完成前向传播
                y_onehot = tf.one_hot(y, depth=10)
                loss_mse = tf.reduce_mean(tf.losses.MSE(y_onehot, logits)) # 均方差计算
                loss_ce = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
                loss_ce = tf.reduce_mean(loss_ce)


            grads = tape.gradient(loss_ce, model.trainable_variables) #完成梯度计算 w1 w2 w3...
            optimizers.apply_gradients(zip(grads, model.trainable_variables))#完成更新

            #打印
            if step % 100 == 0:
                print(epoch, step, 'loss:', float(loss_ce), float(loss_mse))

    # test
        total_correct = 0
        total_num = 0
        for x,y in db_test:

            # x: [b, 28, 28] => [b, 784]
            # y: [b]
            x = tf.reshape(x, [-1, 28 * 28])
            #测试时不需要求梯度
            # [b, 784] = > [b, 10]#
            logits = model(x)  # 完成前向传播
            # logits => prob
            prob = tf.nn.softmax(logits, axis=1)#转换为0——1范围
            # [b, 10] => [10]
            pred = tf.argmax(prob, axis=1)#概率所在值得位置
            pred = tf.cast(pred, dtype=tf.int32)
            # pred:[b]
            # y: [b]
            # correct = [b], True: equal, False: not equal
            correct = tf.equal(pred, y) #equal=1, not equal=0
            correct = tf.reduce_sum(tf.cast(correct, dtype=tf.int32))

            total_correct += int(correct) #tensor转换为numpy
            total_num += x.shape[0]

        acc = total_correct / total_num
        print(epoch, 'test acc:', acc)

if __name__== '__main__':
    main()

3、CIFAR与VGG实战(卷积神经网络)

在这里插入图片描述

import  os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'

import  tensorflow as tf
from    tensorflow.keras import layers, optimizers, datasets, Sequential

tf.random.set_seed(2345)

conv_layers = [ # 5 units of conv + max pooling
    # unit 1
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(64, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 2
    layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(128, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 3
    layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(256, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 4
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 5
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.Conv2D(512, kernel_size=[3, 3], padding="same", activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same')
]


def preprocess(x, y):
    # [0~1]
    x = tf.cast(x, dtype=tf.float32) / 255.
    y = tf.cast(y, dtype=tf.int32)
    return x,y


(x,y), (x_test, y_test) = datasets.cifar100.load_data()
y = tf.squeeze(y, axis=1)
y_test = tf.squeeze(y_test, axis=1)
print(x.shape, y.shape, x_test.shape, y_test.shape)


train_db = tf.data.Dataset.from_tensor_slices((x,y))
train_db = train_db.shuffle(1000).map(preprocess).batch(128)

test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_db = test_db.map(preprocess).batch(64)

sample = next(iter(train_db))
print('sample:', sample[0].shape, sample[1].shape,
      tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))


def main():
	#新建网络
    # [b, 32, 32, 3] => [b, 1, 1, 512]
    conv_net = Sequential(conv_layers)
	#conv_net.build(input_shape=[None,32, 32, 3])
	#x = tf.random.normall([4, 32, 32, 3])
	#out = conv_net(x)
	#print(out.shape)
	#全连接层网络
    fc_net = Sequential([
        layers.Dense(256, activation=tf.nn.relu),
        layers.Dense(128, activation=tf.nn.relu),
        layers.Dense(100, activation=None),
    ])

    conv_net.build(input_shape=[None, 32, 32, 3])
    fc_net.build(input_shape=[None, 512])
    optimizer = optimizers.Adam(lr=1e-4)

    # [1, 2] + [3, 4] => [1, 2, 3, 4]
    variables = conv_net.trainable_variables + fc_net.trainable_variables

    for epoch in range(50):

        for step, (x,y) in enumerate(train_db):

            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 1, 1, 512]
                out = conv_net(x)
                # flatten, => [b, 512]
                out = tf.reshape(out, [-1, 512])
                # [b, 512] => [b, 100]
                logits = fc_net(out)
                # [b] => [b, 100]
                y_onehot = tf.one_hot(y, depth=100)
                # compute loss
                loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
                loss = tf.reduce_mean(loss)

            grads = tape.gradient(loss, variables)
            optimizer.apply_gradients(zip(grads, variables))

            if step %100 == 0:
                print(epoch, step, 'loss:', float(loss))



        total_num = 0
        total_correct = 0
        for x,y in test_db:

            out = conv_net(x)
            out = tf.reshape(out, [-1, 512])
            logits = fc_net(out)
            prob = tf.nn.softmax(logits, axis=1)
            pred = tf.argmax(prob, axis=1)
            pred = tf.cast(pred, dtype=tf.int32)

            correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
            correct = tf.reduce_sum(correct)

            total_num += x.shape[0]
            total_correct += int(correct)

        acc = total_correct / total_num
        print(epoch, 'acc:', acc)



if __name__ == '__main__':
    main()

其他:
OpenCV之处理图像前的基本准备工作
https://blog.csdn.net/hfut_why/article/details/84673348?ops_request_misc=&request_id=&biz_id=102&utm_term=%E5%9B%BE%E5%83%8F%E5%89%8D%E5%A4%84%E7%90%86&utm_medium=distribute.pc_search_result.none-task-blog-2allsobaiduweb~default-4-84673348.pc_search_result_hbase_insert

图像处理:
1、cv2.imread()接口读图像,读进来直接是BGR 格式数据格式在 0~255
需要特别注意的是图片读出来的格式是BGR,不是我们最常见的RGB格式,颜色肯定有区别。

2、cv2.cvtColor(p1,p2) 是颜色空间转换函数,p1是需要转换的图片,p2是转换成何种格式。
cv2.COLOR_BGR2RGB 将BGR格式转换成RGB格式
cv2.COLOR_BGR2GRAY 将BGR格式转换成灰度图片

详细参考:
https://blog.csdn.net/zhang_cherry/article/details/88951259?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522161892910116780261986261%2522%252C%2522scm%2522%253A%252220140713.130102334…%2522%257D&request_id=161892910116780261986261&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2allfirst_rank_v2~rank_v29-1-88951259.pc_search_result_hbase_insert&utm_term=cv2.cvtColor

图像预处理实例:

def preprocess(image_dir):

    image = cv2.imread(image_dir)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    print("pri_image shape: ", image.shape)
    image = cv2.resize(image, (320, 320))
    # 归一化处理
    image = image / np.max(image)
    image[:, :, 0] = (image[:, :, 0] - np.mean(image[:, :, 0])) / np.mean(image[:, :, 0])
    image[:, :, 1] = (image[:, :, 1] - np.mean(image[:, :, 1])) / np.mean(image[:, :, 1])
    image[:, :, 2] = (image[:, :, 2] - np.mean(image[:, :, 2])) / np.mean(image[:, :, 2])
    image = np.transpose(image, (2, 0, 1))
    image = np.expand_dims(image, axis=0)  # 在零维度加1
    print("image_shape has been resized to : ", image.shape)
    image = torch.from_numpy(image)  # 转成tensor
    return image

OpenVino推理框架

ie = IECore()
xml_dir = os.path.join(os.getcwd(), 'saved_models', model_name + '.xml')
bin_dir = os.path.join(os.getcwd(), 'saved_models', model_name + '.bin')
net = ie.read_network(xml_dir, bin_dir)
input_blob = next(iter(net.input_info))
out_blob = next(iter(net.outputs))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值