tensorflow和keras写vgg-d模型

#-*-coding:utf-8-*-
import tensorflow as tf

keras = tf.keras
'''
vgg_network tfsorflow版
'''
def vgg_network(x):
    """
    1.224x224x3的图片,input
    2.卷积 [3,3,3,64]+激活
    3.卷积 [3,3,64,64]+激活
    4.最大池化
    5.卷积 [3,3,64,128]+激活
    6.卷积 [3,3,128,128]+激活
    7.最大池化
    8.卷积 [3,3,128,256]+激活
    9.卷积 [3,3,256,256]+激活
    10.卷积 [3,3,256,256]+激活
    11.最大池化
    12.卷积 [3,3,256,512]+激活
    13.卷积 [3,3,512,512]+激活
    14.卷积 [3,3,512,512]+激活
    15.最大池化
    16.卷积 [3,3,512,512]+激活
    17.卷积 [3,3,512,512]+激活
    18.卷积 [3,3,512,512]+激活
    19.最大池化
    20.?x4096单元的全连接
    21.4096xx4096单元的全连接+激活
    22.4096xx1000单元的全连接+激活
    23.softmax
    """

    with tf.variable_scope('layer1'):
        conv1=tf.nn.conv2d(x,filter=tf.get_variable('w1',[3,3,3,64]),strides=[1,1,1,1],padding='SAME')
        conv1=tf.nn.bias_add(conv1,tf.get_variable('b1',[64]))
        conv1=tf.nn.relu6(conv1)
        conv1=tf.nn.conv2d(conv1,filter=tf.get_variable('w2',[3,3,64,64]),strides=[1,1,1,1],padding='SAME')
        conv1=tf.nn.bias_add(conv1,tf.get_variable('b2',[64]))
        conv1=tf.nn.relu6(conv1)
    with tf.variable_scope('layer2'):
        mp_net=tf.nn.max_pool(conv1,[1,2,2,1],[1,2,2,1],padding='SAME')
    with tf.variable_scope('layer3'):
        conv2=tf.nn.conv2d(mp_net,tf.get_variable('w1',[3,3,64,128]),strides=[1,1,1,1],padding='SAME')
        conv2=tf.nn.bias_add(conv2,tf.get_variable('b1',[128]))
        conv2=tf.nn.relu6(conv2)
        conv2=tf.nn.conv2d(conv2,tf.get_variable('w2',[3,3,128,128]),strides=[1,1,1,1],padding='SAME')
        conv2=tf.nn.bias_add(conv2,tf.get_variable('b2',[128]))
        conv2=tf.nn.relu6(conv2)
    with tf.variable_scope('layer4'):
        mp_net=tf.nn.max_pool(conv2,[1,2,2,1],[1,2,2,1],padding='SAME')
    with tf.variable_scope('layer5'):
        conv3=tf.nn.conv2d(mp_net,tf.get_variable('w1',[3,3,128,256]),strides=[1,1,1,1],padding='SAME')
        conv3=tf.nn.bias_add(conv3,tf.get_variable('b1',[256]))
        conv3=tf.nn.relu6(conv3)
        conv3=tf.nn.conv2d(conv3,tf.get_variable('w2',[3,3,256,256]),strides=[1,1,1,1],padding='SAME')
        conv3=tf.nn.bias_add(conv3,tf.get_variable('b2',[256]))
        conv3=tf.nn.relu6(conv3)
        conv3 = tf.nn.conv2d(conv3, tf.get_variable('w3', [3, 3, 256, 256]), strides=[1, 1, 1, 1], padding='SAME')
        conv3 = tf.nn.bias_add(conv3, tf.get_variable('b3', [256]))
        conv3 = tf.nn.relu6(conv3)
    with tf.variable_scope('layer6'):
        mp_net=tf.nn.max_pool(conv3,[1,2,2,1],[1,2,2,1],padding='SAME')
    with tf.variable_scope('layer7'):
        conv4=tf.nn.conv2d(mp_net,tf.get_variable('w1',[3,3,256,512]),strides=[1,1,1,1],padding='SAME')
        conv4=tf.nn.bias_add(conv4,tf.get_variable('b1',[512]))
        conv4=tf.nn.relu6(conv4)
        conv4=tf.nn.conv2d(conv4,tf.get_variable('w2',[3,3,512,512]),strides=[1,1,1,1],padding='SAME')
        conv4=tf.nn.bias_add(conv4,tf.get_variable('b2',[512]))
        conv4=tf.nn.relu6(conv4)
        conv4 = tf.nn.conv2d(conv4, tf.get_variable('w3', [3, 3, 512, 512]), strides=[1, 1, 1, 1], padding='SAME')
        conv4 = tf.nn.bias_add(conv4, tf.get_variable('b3', [512]))
        conv4 = tf.nn.relu6(conv4)
    with tf.variable_scope('layer8'):
        mp_net = tf.nn.max_pool(conv4, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
    with tf.variable_scope('layer9'):
        conv5 = tf.nn.conv2d(mp_net, tf.get_variable('w1', [3, 3, 512, 512]), strides=[1, 1, 1, 1], padding='SAME')
        conv5 = tf.nn.bias_add(conv5, tf.get_variable('b1', [512]))
        conv5 = tf.nn.relu6(conv5)
        conv5 = tf.nn.conv2d(conv5, tf.get_variable('w2', [3, 3, 512, 512]), strides=[1, 1, 1, 1], padding='SAME')
        conv5 = tf.nn.bias_add(conv5, tf.get_variable('b2', [512]))
        conv5 = tf.nn.relu6(conv5)
        conv5 = tf.nn.conv2d(conv5, tf.get_variable('w3', [3, 3, 512, 512]), strides=[1, 1, 1, 1], padding='SAME')
        conv5 = tf.nn.bias_add(conv5, tf.get_variable('b3', [512]))
        conv5 = tf.nn.relu6(conv5)
    with tf.variable_scope('layer10'):
        mp_net = tf.nn.max_pool(conv5,  [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
    with tf.variable_scope('fc11'):
        sha=mp_net.get_shape()
        units=sha[1]*sha[2]*sha[3]
        unit=tf.reshape(mp_net,[-1,units])
        mat=tf.matmul(unit,tf.get_variable('w',[units,4096]))
        fc1=tf.nn.sigmoid(tf.add(mat,tf.get_variable('b',[4096])))
    with tf.variable_scope("fc12"):
        mat = tf.matmul(fc1, tf.get_variable('w', [4096, 4096]))
        fc2 = tf.nn.sigmoid(tf.add(mat, tf.get_variable('b', [4096])))
    with tf.variable_scope("fc13"):
        mat = tf.matmul(fc2, tf.get_variable('w', [4096, 1000]))
    with tf.variable_scope('softmax14'):
        out=tf.nn.softmax(mat)
    return out
'''
vgg_network keras版
'''
def vgg_net():
    """
     1.224x224x3的图片,input
     2.卷积 [3,3,3,64]+激活
     3.卷积 [3,3,64,64]+激活
     4.最大池化
     5.卷积 [3,3,64,128]+激活
     6.卷积 [3,3,128,128]+激活
     7.最大池化
     8.卷积 [3,3,128,256]+激活
     9.卷积 [3,3,256,256]+激活
     10.卷积 [3,3,256,256]+激活
     11.最大池化
     12.卷积 [3,3,256,512]+激活
     13.卷积 [3,3,512,512]+激活
     14.卷积 [3,3,512,512]+激活
     15.最大池化
     16.卷积 [3,3,512,512]+激活
     17.卷积 [3,3,512,512]+激活
     18.卷积 [3,3,512,512]+激活
     19.最大池化
     20.?x4096单元的全连接
     21.4096xx4096单元的全连接+激活
     22.4096xx1000单元的全连接+激活
     23.softmax
     """

    inputs=keras.layers.Input((224,224,3))
    conv1=keras.layers.Conv2D(64,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(inputs)
    conv2=keras.layers.Conv2D(64,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(conv1)
    max_pool_1=keras.layers.MaxPool2D([2,2],[1,1],padding='same')(conv2)
    #2
    conv3=keras.layers.Conv2D(128,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(max_pool_1)
    conv4=keras.layers.Conv2D(128,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(conv3)
    max_pool_2=keras.layers.MaxPool2D([2,2],[1,1],padding='same')(conv4)
    #3
    conv=keras.layers.Conv2D(256,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(max_pool_2)
    conv=keras.layers.Conv2D(256,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(conv)
    conv=keras.layers.Conv2D(256,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(conv)
    max_pool=keras.layers.MaxPool2D([2,2],[1,1],padding='same')(conv)
    #4
    conv=keras.layers.Conv2D(512,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(max_pool)
    conv=keras.layers.Conv2D(512,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(conv)
    conv=keras.layers.Conv2D(512,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(conv)
    max_pool=keras.layers.MaxPool2D([2,2],[1,1],padding='same')(conv)
    #5
    conv=keras.layers.Conv2D(512,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(max_pool)
    conv=keras.layers.Conv2D(512,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(conv)
    conv=keras.layers.Conv2D(512,[3,3],strides=[1,1],padding='same',activation=keras.activations.relu)(conv)
    max_pool=keras.layers.MaxPool2D([2,2],[1,1],padding='same')(conv)
    #6
    fc=keras.layers.Flatten()(max_pool)
    fc=keras.layers.Dense(4096,activation=tf.keras.activations.sigmoid)(fc)
    #7
    fc=keras.layers.Dense(4096,activation=tf.keras.activations.sigmoid)(fc)
    #8
    outputs=keras.layers.Dense(1000,activation=tf.keras.activations.softmax)(fc)
    model=keras.Model(inputs=inputs,outputs=outputs)
    return model

def test():
    #读数据
    data=tf.read_file(r'd://草莓.jpg')
    img=tf.image.decode_jpeg(data,channels=3)
    img1=tf.image.resize_images(img,[224,224],method=1)
    img1=tf.cast(tf.expand_dims(img1,0), dtype='float32')
    model=vgg_net()
    output=model(img1)
    print("keras模型的",output)
    output2=vgg_network(img1)
    print("tensorflow模型的",output2)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(output)
        sess.run(output2)

if __name__ == '__main__':
    test()
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值