AlexNet模型python实现与应用

AlexNet模型实现流程
该模型总共应用五个卷积层和3个完全连接层进行卷积模型构建,其中第一和第二卷积层后有局部相应归一化处理(LRN),第一二五层后进行了最大池化处理,后三个完全连接层均进行了dropout防过拟合处理。以下为网络模型的参数维度、经过处理后训练集样本维度和参数数量等相关信息。另外在第二第四第五层分成了两个GPU进行模型训练,此时参数中的channel(第三个维度)要对半分,标红部分需要除以2。
参数相关信息表
层名
参数W
参数b
strides
参数数量
输出维度
conv0
——
——
——
——
227*227*3
conv1
11*11*3*96
96
4*4
(11*11*3+1)*96
55*55*96
LRN1
——
——
——
——
55*55*96
max_pool1
3*3
——
2*2
0
27*27*96
conv2
5*5* 96 *256
256
1*1
(5*5*96+1)*256
27*27*256
LRN2
——
——
——
——
27*27*256
max_pool2
3*3
——
2*2
0
13*13*256
conv3
3*3*256*384
384
1*1
(3*3*256+1)*384
13*13*384
conv4
3*3* 384 *384
384
1*1
(3*3*384+1)*384
13*13*384
conv5
3*3* 384 *256
256
1*1
(3*3*384+1)*256
13*13*256
max_pool3
3*3
——
2*2
0
6*6*256
fcLayer1
(6*6*256,4096)
4096
——
(6*6*256+1)*4096
(-1,4096)
fcLayer2
(4096,4096)
4096
——
(4096+1)*4096
(-1,4096)
fcLayer3
(4096,10)
10
——
(4096+1)*10
(-1,10)
AlexNet模型的python实现
1.定义卷积层函数
本人定义的函数没有将参数划分成两部分,若划分成两部分,可用以下代码:
2.定义LRN函数(进行局部相应归一化处理)
3.定义最大池化函数
4.定义dropout函数(可定义也可不定义,区别不大)
5.定义完全连接层

用Alexnet模型训练淘宝商品分类1000张图片
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
import random

def convinit(w,h,channel,featurenum):
    W = tf.Variable(tf.truncated_normal([w,h,channel,featurenum],stddev = 0.01))#首先需要创建W和b变量
    b = tf.Variable(tf.constant(0.01,shape = [featurenum]))
    return W,b
def fcinit(inputD,outputD):
    W = tf.Variable(tf.truncated_normal([inputD,outputD],stddev =0.01),dtype = tf.float32)
    b = tf.Variable(tf.constant(0.01,shape = [outputD]),dtype = tf.float32)
    return W,b
def convLayer(x,W,b,stride_x,stride_y,Flagure,padding = 'SAME'):
    conv = tf.nn.conv2d(x,W,strides = [1,stride_x,stride_y,1],padding = padding)#进行卷积处理
    out = tf.add(conv,b)
    if Flagure:
        return tf.nn.relu(out)
    else:
        return out #在最后一个卷积时不需要用relu
def LRN(x,alpha,beta,R,bias):
    y = tf.nn.local_response_normalization(x,depth_radius = R,alpha = alpha,beta = beta,bias = bias)
    return y 
def max_poolLayer(x,w,h,stride_x,stride_y,padding = 'SAME'):
    y = tf.nn.max_pool(x,ksize = [1,w,h,1],strides = [1,stride_x,stride_y,1],padding = padding)
    return y
def dropout(x,keeppro):
    y = tf.nn.dropout(x,keeppro)
    return y
def fcLayer(x,W,b,Flagure):
    out = tf.add(tf.matmul(x,W),b)
    if Flagure:
        return tf.nn.relu(out)
    else:
        return out
def model(x,keeppro):
    #conv1
    W1,b1 = convinit(10,10,3,64)
    conv1 = convLayer(x,W1,b1,4,4,True,'VALID')
    LRN1 = LRN(conv1,2e-05,0.75,2,1)
    maxpool1 = max_poolLayer(LRN1,3,3,2,2,'VALID')
    #conv2
    W2,b2 = convinit(5,5,64,96)
    conv2 = convLayer(maxpool1,W2,b2,2,2,True,'VALID')
    LRN2 = LRN(conv2,2e-05,0.75,2,1)
    maxpool2 = max_poolLayer(LRN2,3,3,2,2,'VALID')
    #conv3
    W3,b3 = convinit(3,3,96,128)
    conv3 = convLayer(maxpool2,W3,b3,1,1,True,'SAME')
    #conv4
    W4,b4 = convinit(3,3,128,256)
    conv4 = convLayer(conv3,W4,b4,1,1,True,'SAME')
    #conv5
    W5,b5 = convinit(3,3,256,256)
    conv5 = convLayer(conv4,W5,b5,1,1,True,'SAME')
    maxpool5 = max_poolLayer(conv5,2,2,2,2,'SAME')
    #fclayer1
    fcIn = tf.reshape(maxpool5,[-1,4*4*256])
    W_1,b_1 = fcinit(4*4*256,512)
    fcout1 = fcLayer(fcIn,W_1,b_1,True)
    dropout1 = dropout(fcout1,keeppro)
    #fclayer2
    W_2,b_2 = fcinit(512,256)
    fcout2 = fcLayer(dropout1,W_2,b_2,True)
    dropout2 = dropout(fcout2,keeppro)
    #fclayer3
    W_3,b_3 = fcinit(256,10)
    fcout3 = fcLayer(dropout2,W_3,b_3,False)    
    out_1 = tf.nn.softmax(fcout3)
    out = dropout(out_1,keeppro)
    return out 
def accuracy(x,y):
    global out
    predict = sess.run(out,feed_dict = {x:test_x,keeppro:0.5})
    correct_predict = tf.equal(tf.argmax(predict,1),tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_predict,tf.float32))
    result = sess.run(accuracy,feed_dict = {x:test_x,y:test_y,keeppro:0.5})
    return predict,result 

#make data
#read file
file = 'D:\\CNN paper\\Alex_net\\image1000test200\\train.txt'
os.chdir('D:\\CNN paper\\Alex_net\\image1000test200\\train')
with open(file,'rb') as f:
    dirdata = []
    for line in f.readlines():
        lines = bytes.decode(line).strip().split('\t')
        dirdata.append(lines)
dirdata = np.array(dirdata)

#read imgdata
imgdir,label_1 = zip(*dirdata)
alldata_x = []
for dirname in imgdir:
    img = cv2.imread(dirname.strip(),cv2.IMREAD_COLOR)
    imgdata = cv2.resize(img,(320,320),cv2.INTER_LINEAR)
    alldata_x.append(imgdata)
#random shuffle
alldata = zip(alldata_x,label_1)
temp = list(alldata)
random.shuffle(temp)
data_xs,data_label = zip(*temp)
data_x = np.array(data_xs)
label = [int(i) for i in data_label]
#label one hot
tf_label_onehot = tf.one_hot(label,10)
with tf.Session() as sess:
    data_y = sess.run(tf_label_onehot)
#data increase
train_x = data_x[:500]
train_y = data_y[:500]
test_x = data_x[500:800]
test_y = data_y[500:800]

x = tf.placeholder(tf.float32,[None,320,320,3])
y = tf.placeholder(tf.float32,[None,10])
keeppro = tf.placeholder(tf.float32)
out = model(x,keeppro)
out = tf.clip_by_value(out,1e-10,1.0)
loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(out),reduction_indices = [1]))
Optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    for i in range(100):
        sess.run(Optimizer,feed_dict = {x:train_x,y:train_y,keeppro:0.5})
        if i%10 == 0:
            cost = sess.run(loss,feed_dict = {x:train_x,y:train_y,keeppro:0.5})
            print('after %d iteration,cost is %f'%(i,cost))
            predict = sess.run(out,feed_dict = {x:test_x,keeppro:0.5})
            correct_predict = tf.equal(tf.argmax(predict,1),tf.argmax(y,1))
          accuracy = tf.reduce_mean(tf.cast(correct_predict,tf.float32))
          result = sess.run(accuracy,feed_dict = {x:test_x,y:test_y,keeppro:0.5})
          print('after %d iteration,accuracy is %f'%(i,result))



  • 3
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值