#Tensorflow# TypeError: set_shape() takes 1 positional argument but 2 were given

I want to use tensorflow to train my own CNN network, the input data is 175*175*1 size image. But I met TypeError when use placeholder in train.py.

>inference.py

import tensorflow as tf
import numpy as np

INPUT_NODE=175
OUTPUT_NODE=175
LAYER_1_NODE=86

IMAGE_HEIGHT=175
IMAGE_WEIGHT=175
NUM_INPUT_CHANEL=1
NUM_LAYER=11
NUM_OUTPUT_CHANEL=1
OUTPUT_CHANEL=3
OUTPUT_SIZE=175
CONV1_SIZE=5
CONV1_DEEP=16
CONV2_SIZE=8
CONV2_DEEP=32
CONV3_SIZE=8
CONV3_DEEP=64
CONV4_SIZE=5
CONV4_DEEP=128
CONV5_SIZE=7
CONV5_DEEP=128

def inference(input_tensor, train, regularizer):
    with tf.variable_scope('layer1_Conve'):
        conv1_weight=tf.get_variable("weight",[CONV1_SIZE,CONV1_SIZE,NUM_INPUT_CHANEL,CONV1_DEEP],
                                     initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(conv1_weight))
        conv1_bias=tf.get_variable("bias",[CONV1_DEEP],initializer=tf.constant_initializer(0.0))
        conv1=tf.nn.conv2d(input_tensor,conv1_weight,strides=[1,2,2,1],padding="VALID")
        elu1=tf.nn.elu(tf.nn.bias_add(conv1,conv1_bias))
        if train:
            conv1 = tf.nn.dropout(elu1, 0.5)       
    with tf.variable_scope('layer2_Conve'):
        conv2_weight=tf.get_variable("weight",[CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],
                                     initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(conv2_weight))
        conv2_bias=tf.get_variable("bias",[CONV2_DEEP],initializer=tf.constant_initializer(0.0))
        conv2=tf.nn.conv2d(conv1,conv2_weight,strides=[1,2,2,1],padding="VALID")
        elu2=tf.nn.elu(tf.nn.bias_add(conv2,conv2_bias))
        if train:
            conv2 = tf.nn.dropout(elu2, 0.5)

    with tf.variable_scope('layer3_Conve'):
        conv3_weight=tf.get_variable("weight",[CONV3_SIZE,CONV3_SIZE,CONV2_DEEP,CONV3_DEEP],
                                     initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(conv3_weight))
        conv3_bias=tf.get_variable("bias",[CONV3_DEEP],initializer=tf.constant_initializer(0.0))
        conv3=tf.nn.conv2d(conv2,conv3_weight,strides=[1,2,2,1],padding="VALID")
        elu3=tf.nn.elu(tf.nn.bias_add(conv3,conv3_bias))
        if train:
            conv3 = tf.nn.dropout(elu3, 0.5)

    with tf.variable_scope('layer4_Conve'):
        conv4_weight=tf.get_variable("weight",[CONV4_SIZE,CONV4_SIZE,CONV3_DEEP,CONV4_DEEP],
                                     initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(conv4_weight))
        conv4_bias=tf.get_variable("bias",[CONV4_DEEP],initializer=tf.constant_initializer(0.0))
        conv4=tf.nn.conv2d(conv3,conv4_weight,strides=[1,2,2,1],padding="VALID")
        elu4=tf.nn.elu(tf.nn.bias_add(conv4,conv4_bias))
        if train:
            conv4 = tf.nn.dropout(elu4, 0.5)  
    with tf.variable_scope('layer5_Conve'):
        conv5_weight=tf.get_variable("weight",[CONV5_SIZE,CONV5_SIZE,CONV4_DEEP,CONV5_DEEP],
                                     initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(conv5_weight))
        conv5_bias=tf.get_variable("bias",[CONV5_DEEP],initializer=tf.constant_initializer(0.0))
        conv5=tf.nn.conv2d(conv4,conv5_weight,strides=[1,1,1,1],padding="VALID")
        elu5=tf.nn.elu(tf.nn.bias_add(conv5,conv5_bias))
        if train:
            conv5 = tf.nn.dropout(elu5, 0.5)  

    with tf.variable_scope('layer6_deconv1'): 
        in_shape = conv5.get_shape().as_list()
        h = ((in_shape[1] - 1) * 1) + CONV5_SIZE
        w = ((in_shape[2] - 1) * 1) + CONV5_SIZE
        new_shape = [in_shape[0], h, w, CONV4_DEEP]
        outputs_shape_1 = tf.stack(new_shape)
        deconv1_weight = tf.get_variable("weight",[CONV5_SIZE,CONV5_SIZE,CONV4_DEEP,CONV5_DEEP],
                                     initializer=tf.truncated_normal_initializer(stddev=0.1))  
        deconv1_bias=tf.get_variable("bias",[CONV4_DEEP],initializer=tf.constant_initializer(0.0))
        deconv1 = tf.nn.conv2d_transpose(conv5, deconv1_weight,output_shape=outputs_shape_1, strides=[1, 1, 1, 1], padding='SAME')
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(deconv1_weight))
        elu6=tf.nn.elu(tf.nn.bias_add(deconv1,deconv1_bias))

    with tf.variable_scope('layer7_deconv2'): 
        in_shape = elu6.get_shape().as_list()
        h = ((in_shape[1] - 1) * 2) + CONV4_SIZE
        w = ((in_shape[2] - 1) * 2) + CONV4_SIZE
        new_shape = [in_shape[0], h, w, CONV3_DEEP]
        outputs_shape_2 = tf.stack(new_shape)
        deconv2_weight = tf.get_variable("weight",[CONV4_SIZE,CONV4_SIZE,CONV3_DEEP,CONV4_DEEP],
                                     initializer=tf.truncated_normal_initializer(stddev=0.1))  
        deconv2_bias=tf.get_variable("bias",[CONV3_DEEP],initializer=tf.constant_initializer(0.0))
        deconv2 = tf.nn.conv2d_transpose(elu6, deconv2_weight, output_shape=outputs_shape_2,strides=[1, 2, 2, 1], padding='SAME')
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(deconv2_weight))
        elu7=tf.nn.elu(tf.nn.bias_add(deconv2, deconv2_bias))

    with tf.variable_scope('layer8_deconv3'):  
        in_shape = elu7.get_shape().as_list()
        h = ((in_shape[1] - 1) * 2) + CONV3_SIZE
        w = ((in_shape[2] - 1) * 2) + CONV3_SIZE
        new_shape = [in_shape[0], h, w, CONV2_DEEP]
        outputs_shape_3 = tf.stack(new_shape)
        deconv3_weight = tf.get_variable("weight",[CONV3_SIZE,CONV3_SIZE,CONV2_DEEP,CONV3_DEEP],
                                     initializer=tf.truncated_normal_initializer(stddev=0.1))  
        deconv3_bias=tf.get_variable("bias",[CONV2_DEEP],initializer=tf.constant_initializer(0.0))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(deconv3_weight))
        deconv3 = tf.nn.conv2d_transpose(elu7, deconv3_weight, output_shape=outputs_shape_3,strides=[1, 2, 2, 1], padding='SAME')
        elu8=tf.nn.elu(tf.nn.bias_add(deconv3,deconv3_bias))

    with tf.variable_scope('layer9_deconv4'):  
        in_shape = elu8.get_shape().as_list()
        h = ((in_shape[1] - 1) * 2) + CONV2_SIZE
        w = ((in_shape[2] - 1) * 2) + CONV2_SIZE
        new_shape = [in_shape[0], h, w, CONV1_DEEP]
        outputs_shape_4 = tf.stack(new_shape)
        deconv4_weight = tf.get_variable("weight",[CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],
                                     initializer=tf.truncated_normal_initializer(stddev=0.1))  
        deconv4_bias=tf.get_variable("bias",[CONV1_DEEP],initializer=tf.constant_initializer(0.0))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(deconv4_weight))
        deconv4 = tf.nn.conv2d_transpose(elu8, deconv4_weight,output_shape=outputs_shape_4, strides=[1, 2, 2, 1], padding='SAME') 
        elu9=tf.nn.elu(tf.nn.bias_add(deconv4,deconv4_bias))

        reshape=elu9.get_shape().as_list()
        nodes=reshape[1]*reshape[2]*reshape[3]
        elu9_shaped=tf.reshape(elu9,[reshape[0],nodes])


    with tf.variable_scope('output'):
        output_weight = tf.get_variable("weight",[nodes,OUTPUT_CHANEL],
                                     initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None:
            tf.add_to_collection('losses', regularizer(output_weight))
        output_bias=tf.get_variable("bias",[OUTPUT_CHANEL],initializer=tf.constant_initializer(0.0))
        logit=tf.matmul(elu9_shaped, output_weight) + output_bias
    return logit

train.py

import os
import numpy as np
import tensorflow as tf
import inference
import DataConverted
BATCH_SIZE = 3
LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 300
MOVING_AVERAGE_DECAY = 0.99
num_examples=21
MODEL_SAVE_PATH = "model/"
MODEL_NAME = "model.ckpt"

train():
    x = tf.placeholder(tf.float32,shape=[1,inference.IMAGE_WEIGHT,inference.IMAGE_HEIGHT, inference.NUM_INPUT_CHANEL], name='input_x')          

    y_ = tf.placeholder(tf.float32,shape=[None,inference.OUTPUT_CHANEL],name='input_y')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE) 
    y = inference.inference(x, True, regularizer)
    global_step = tf.Variable(0, trainable=False)
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(y_, 1), logits=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss =cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    print(loss.shape())
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, num_examples/BATCH_SIZE, LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()
    with tf.Session() as sess:

        tf.global_variables_initializer().run()
        traindata= DataConverted.read_datasets()
        traindata=list(traindata)
        n=len(traindata)
        for i in range(TRAINING_STEPS):
            np.random.shuffle(traindata)
            mini_batches=[traindata[k:k+BATCH_SIZE] for k in range(0,n,BATCH_SIZE)]
            for mini_batch in mini_batches:
                for(x,y) in mini_batch:
                    xs=x
                    ys=y
                    _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})

                    if i%100== 0:
                        print("After %d training step(s), loss on training batch is %f." % (step, loss_value)
                        saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step

def main(argv=None):
     train()
if __name__ == '__main__':
    main()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值