python神经网络训练损失率nan_训练神经网络时将损失值设为0

importtensorflowastfimportnumpyasnpimportrequestsfromsklearn.model_selectionimporttrain_test_split

BATCH_SIZE=20#Get databirthdata_url='http://springer.bme.gatech.edu/Ch17.Logistic/Logisticdat/lowbwt.dat'birth_file=requests.get(birthdata_url)birth_data=birth_file.text.split('\r\n')[5:]birth_data=np.array([[xforxiny.split(' ')iflen(x)>=1]foryinbirth_data[1:]iflen(y)>=1])#Get x and y valsy_vals=np.array([x[1]forxinbirth_data]).reshape((-1,1))x_vals=np.array([x[2:10]forxinbirth_data])#Split datax_train,x_test,y_train,y_test=train_test_split(x_vals,y_vals,test_size=0.3)#Placeholdersx_data=tf.placeholder(dtype=tf.float32,shape=[None,8])y_data=tf.placeholder(dtype=tf.float32,shape=[None,1])#Define our Neural Networkdefinit_weight(shape):returntf.Variable(tf.truncated_normal(shape=shape,stddev=0.1))definit_bias(shape):returntf.Variable(tf.constant(0.1,shape=shape))deffully_connected(inp_layer,weights,biases):returntf.nn.relu(tf.matmul(inp_layer,weights)+biases)defnn(x):w1=init_weight([8,25])b1=init_bias([25])layer1=fully_connected(x,w1,b1)w2=init_weight([25,10])b2=init_bias([10])layer2=fully_connected(layer1,w2,b2)w3=init_weight([10,3])b3=init_bias([3])layer3=fully_connected(layer2,w3,b3)w4=init_weight([3,1])b4=init_bias([1])final_output=fully_connected(layer3,w4,b4)returnfinal_output#Predicted values.y_=nn(x_data)#Loss and training step.loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_data,logits=y_))train_step=tf.train.AdamOptimizer(0.1).minimize(loss)#Initalize session and global variablessess=tf.Session()sess.run(tf.global_variables_initializer())#Accuracydefget_accuracy(logits,labels):batch_predicitons=np.argmax(logits,axis=1)num_correct=np.sum(np.equal(batch_predicitons,labels))return(100*num_correct/batch_predicitons.shape[0])loss_vec=[]foriinrange(500):#Get random indexes and create batches.rand_index=np.random.choice(len(x_train),size=BATCH_SIZE)#x and y batch.rand_x=x_train[rand_index]rand_y=y_train[rand_index]#Run the training step.sess.run(train_step,feed_dict={x_data:rand_x,y_data:rand_y})#Get the current loss.temp_loss=sess.run(loss,feed_dict={x_data:x_test,y_data:y_test})loss_vec.append(temp_loss)if(i+1)%20==0:print("Current Step is: {}, Loss: {}".format((i+1),temp_loss))#print("-----Test Accuracy: {}-----".format(get_accuracy(logits=sess.run(y_,feed_dict={x_data:x_test}),labels=y_test)))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值