tensorflow手写u-net网络

U-net网络

图片: Alt

我在网上看了几篇unet网络,有不少程序,和代码。有的用的是keras写的。但是由于没有钻里面的知识点,导致我在换数据后不能得出自己想要的结果。由于之前没有读懂网络,所以,也不知道为什么会那个样子。所以,我就仔细研读的了论文以及上面的流程图。

下面的代码,我将网络进行的改变。
·
unet网络论文中,最后一层是输出两层卷积核。我把这一层后面,再加入了一个一层的卷积核。

流程图详解
由上图可知:输入是572×572×1的灰度图。通过卷积变成570×570×1的图,原始论文上在这里用的就不是“SAME”。在这里,,我为了让输入图像的大小等于输出图像的大小。我在这里进行了改动。

改动方案
我的图片的数据是512×512大小的,然后,我就一直用输入512×512的图像沿着流程图进行两次卷积一次池化······

流程思路讲述完毕了,那就直接开始上代码吧!

import tensorflow as tf
import numpy as np
import os
import glob
import re
import cv2 as cv

batch_size=2

def data_load(paths):
    dir_train=os.path.join(paths,'train')
    dir_test=os.path.join(paths,'test')
    test=glob.glob(dir_test+'/*')
    train_imgs=os.path.join(dir_train,'image')  # data/train/image
    lables_imgs=os.path.join(dir_train,'label') # data/train/label
    imgs=glob.glob(train_imgs+'/*') # data/train/image/*
    lables=glob.glob(lables_imgs+'/*.tif')
    imgs=sorted(imgs)
    lables=sorted(lables)
    image=[]
    lable=[]
    tests=[]
    for path in imgs:
        # print(path)
        img=cv.imread(path,0)
        img=img/255.0
        image.append(img)
    for path in lables:
        # print(path)
        img=cv.imread(path,0)
        img=img/255.0
        lable.append(img)
    for path in test:
        # print(path)
        img=cv.imread(path,0)
        img=img/255.0
        tests.append(img)
    image=np.array(image,np.float32).reshape(-1,512,512,1)
    lable=np.array(lable,np.float32).reshape(-1,512,512,1)
    tests=np.array(tests,np.float32).reshape(-1,512,512,1)
    return image,lable,tests

def saves(imgs):
    k=0
    for img in imgs:
        print(img)
        # image=np.array(img)
        # image[image>0.5]=
        img[img>0.5]=1
        img[img<=0.5]=0
        img=img*255.0
        cv.imwrite('test/%d.png'%(k),img)
        k+=1

X,Y,test=data_load('data')
print(test[0])
print(X.shape)
print(Y.shape)
print(test.shape)

data=tf.placeholder(tf.float32,[None,512,512,1])
lables=tf.placeholder(tf.float32,[None,512,512,1])

def weight(shape):
    w=tf.truncated_normal(shape,0,0.01)
    return tf.Variable(w)
def biases(shape):
    b=tf.zeros(shape)
    return tf.Variable(b)

w1=weight([3,3,1,32])
b1=biases([32])
conv1=tf.nn.relu(tf.nn.conv2d(data,w1,[1,1,1,1],'SAME')+b1)

pool1=tf.nn.max_pool(conv1,[1,2,2,1],[1,2,2,1],'SAME')

w2=weight([3,3,32,32])
b2=biases([32])
conv2=tf.nn.relu(tf.nn.conv2d(conv1,w2,[1,1,1,1],'SAME')+b2)


# w3=weight([3,3,128,1])
# b3=biases([1])
# conv3=tf.nn.sigmoid(tf.nn.conv2d(mer1,w3,[1,1,1,1],'SAME')+b3)

w3=weight([3,3,32,64])
b3=biases([64])
conv3=tf.nn.relu(tf.nn.conv2d(pool1,w3,[1,1,1,1],'SAME')+b3)

w4=weight([3,3,64,64])
b4=biases([64])
conv4=tf.nn.relu(tf.nn.conv2d(conv3,w4,[1,1,1,1],'SAME')+b4)
conv4=tf.nn.dropout(conv4,0.5)


pool2=tf.nn.max_pool(conv4,[1,2,2,1],[1,2,2,1],'SAME')



w5=weight([3,3,64,128])
b5=biases([128])
conv5=tf.nn.relu(tf.nn.conv2d(pool2,w5,[1,1,1,1],'SAME')+b5)

w6=weight([3,3,128,128])
b6=biases([128])
conv6=tf.nn.relu(tf.nn.conv2d(conv5,w6,[1,1,1,1],'SAME')+b6)
conv6=tf.nn.dropout(conv6,0.5)


pool3=tf.nn.max_pool(conv6,[1,2,2,1],[1,2,2,1],'SAME')

w7=weight([3,3,128,256])
b7=biases([256])
conv7=tf.nn.relu(tf.nn.conv2d(pool3,w7,[1,1,1,1],'SAME')+b7)

w8=weight([3,3,256,256])
b8=biases([256])
conv8=tf.nn.relu(tf.nn.conv2d(conv7,w8,[1,1,1,1],'SAME')+b8)
# print(conv8.shape)
conv8=tf.nn.dropout(conv8,0.5)


pool4=tf.nn.max_pool(conv8,[1,2,2,1],[1,2,2,1],'SAME')

w9=weight([3,3,256,512])
b9=biases([512])
conv9=tf.nn.relu(tf.nn.conv2d(pool4,w9,[1,1,1,1],'SAME')+b9)

w10=weight([3,3,512,512])
b10=weight([512])
conv10=tf.nn.relu(tf.nn.conv2d(conv9,w10,[1,1,1,1],'SAME')+b10)
conv10=tf.nn.dropout(conv10,0.5)


w11=weight([3,3,256,512])
conv11=tf.nn.conv2d_transpose(conv10,w11,tf.shape(conv8),[1,2,2,1],'SAME')
print(conv11.shape)
#
mer1=tf.concat([conv11,conv8],axis=3)
print(mer1.shape)

w12=weight([3,3,512,256])
b12=biases([256])
conv12=tf.nn.relu(tf.nn.conv2d(mer1,w12,[1,1,1,1],'SAME')+b12)


w13=weight([3,3,256,256])
b13=biases([256])
conv13=tf.nn.relu(tf.nn.conv2d(conv12,w13,[1,1,1,1],'SAME')+b13)
conv13=tf.nn.dropout(conv13,0.5)


w14=weight([3,3,128,256])
conv14=tf.nn.conv2d_transpose(conv13,w14,tf.shape(conv6),[1,2,2,1],'SAME')

mer2=tf.concat([conv14,conv6],axis=3)
print(mer2.shape)

w15=weight([3,3,256,128])
b15=biases([128])
conv15=tf.nn.relu(tf.nn.conv2d(mer2,w15,[1,1,1,1],'SAME')+b15)

w16=weight([3,3,128,128])
b16=weight([128])
conv16=tf.nn.relu(tf.nn.conv2d(conv15,w16,[1,1,1,1],'SAME')+b16)
conv16=tf.nn.dropout(conv16,0.5)


w17=weight([3,3,64,128])
conv17=tf.nn.conv2d_transpose(conv16,w17,tf.shape(conv4),[1,2,2,1],'SAME')

mer3=tf.concat([conv17,conv4],axis=3)

w18=weight([3,3,128,64])
b18=biases([64])
conv18=tf.nn.relu(tf.nn.conv2d(mer3,w18,[1,1,1,1],'SAME')+b18)

w19=weight([3,3,64,64])
b19=biases([64])
conv19=tf.nn.relu(tf.nn.conv2d(conv18,w19,[1,1,1,1],'SAME')+b19)
conv19=tf.nn.dropout(conv19,0.5)


w20=weight([3,3,32,64])
conv20=tf.nn.conv2d_transpose(conv19,w20,tf.shape(conv2),[1,2,2,1],'SAME')

mer4=tf.concat([conv20,conv2],axis=3)

print(mer4.shape)

w21=weight([3,3,64,32])
b21=biases([32])
conv21=tf.nn.relu(tf.nn.conv2d(mer4,w21,[1,1,1,1],'SAME')+b21)

w22=weight([3,3,32,32])
b22=biases([32])
conv22=tf.nn.relu(tf.nn.conv2d(conv21,w22,[1,1,1,1],'SAME')+b22)
conv22=tf.nn.dropout(conv22,0.5)


w23=weight([3,3,32,2])
b23=biases([2])
conv23=tf.nn.relu(tf.nn.conv2d(conv22,w23,[1,1,1,1],'SAME')+b23)

w24=weight([1,1,2,1])
b24=biases([1])
# conv24=tf.nn.sigmoid(tf.nn.conv2d(conv23,w24,[1,1,1,1],'SAME')+b24)

conv24=tf.nn.conv2d(conv23,w24,[1,1,1,1],'SAME')+b24

y_=tf.nn.sigmoid(conv24)
# loss=-tf.reduce_mean(lables*tf.log(tf.clip_by_value(conv24,1e-8,1.0)))
# loss=tf.reduce_mean(tf.square(lables-conv24))
loss=tf.nn.sigmoid_cross_entropy_with_logits(labels=lables,logits=conv24)
# loss=tf.reduce_mean(pow((lables-conv24),2))
train=tf.train.AdamOptimizer(1e-4).minimize(loss)

init=tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    for i in range(50):
        l=np.random.permutation(len(X))
        x=X[l,:,:,:]
        y=Y[l,:,:,:]
        # print('l:',len(x))
        for j in range(len(x)//batch_size):
            train_x=x[j*batch_size:(j+1)*batch_size,:,:,:]
            lable_x=y[j*batch_size:(j+1)*batch_size,:,:,:]
            _,lo=sess.run([train,loss],feed_dict={data:train_x,lables:lable_x})
            # _, lo = sess.run([train, conv5], feed_dict={data: train_x, lables: lable_x})
        if i%5==0:
            print("损失函数: \n",i)
    y_=sess.run(y_,feed_dict={data:test})
    print(y_)
    saves(y_)
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值