keras训练网络回归角度,仅作个人笔记。

import numpy as np
import scipy
import os
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D,AveragePooling2D
from keras.layers import BatchNormalization,Activation
from keras.optimizers import SGD
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
from keras import backend as K
from keras.utils import np_utils
from matplotlib import pyplot as plt
from PIL import Image
n = 10000
index=[i for i in range(10000)]
#print(index)
#print("***************")
np.random.shuffle(index)
fileresult=open('photo/label.txt','r')
array=fileresult.readlines()
def data_generator(batch_size):
    '''data generator for fit_generator'''
    # n = 5000
    # index=[i for i in range(5000)]
    # print(index)
    # print("***************")
    # np.random.shuffle(index)
    # #i = 0
    # print(index)
    
    k=0
    # fileresult=open('pho/label.txt','r')
    # array=fileresult.readlines()
    # print(len(array))
    while True:
        x= []
        y = []

        #print("a")
        for b in range(batch_size):
            #print("b")
            #index=np.random.randint(0, n-1)
            c=b+batch_size*k
            #print(k,b,c,index[c])
            file_path='photo/{}.png'.format(index[c]+1)
            img=Image.open(file_path)
            #print(index[c]+1)
            #print("c")
            #img=img.resize((224, 224),Image.ANTIALIAS)
            #ng = img.transpose(Image.FLIP_LEFT_RIGHT)
            #print("c:",c,"k:",k)
            arr=np.array(img)
            arr=arr/255.0
            x.append(arr)
            #arr=np.array(ng)
            #arr=arr/255.0
            #x.append(arr)
            #print("d")
            c_array=array[c].split(" ")
            #print("e")
            #print(c_array)
            tt=float(c_array[-1][0:-1])
            #print(tt)
            y.append(tt)
            #y.append((180-tt if tt>0 else -180-tt)/180)
            #i = (i+1) % n
        x = np.array(x)
        #print(x)
        y = np.array(y)
        #print(y)
        x=np.expand_dims(x, axis=3);
        y=np.expand_dims(y, axis=1);
        k+=1
        if(k==10000//128):
            k=0
        yield [x, y]
def feature_normalize(dataset):



    mu = np.mean(dataset)

    sigma = np.std(dataset)

    return (dataset - mu)/sigma
cwd='photo/'
x_train=[]
for id in range(1,5001):
    file_path='photo/{}.png'.format(id)
    line=[]
    img=Image.open(file_path)
        #print(img.size)
        #print(img.mode)
    arr=np.array(img)
    arr=arr/255.0
        # plt.figure("box")
        # plt.imshow(img)
        # plt.show()
    x_train.append(arr)
        #np.array(x_train)

x_test=[]
for id in range(5001,10001):
        file_path='photo/{}.txt'.format(id)
        line=[]
        fileresult=open(file_path,'r')
        for c in fileresult.readlines():
            lc=c[1:-2].split(' ')
            #print(lc)
            tt=map(float,lc)
            line.append(tt)
        x_test.append(line)
y_train=[]
fileresult=open('photo/result.txt','r')
for c in fileresult.readlines():
    tt=int(c)
    y_train.append(tt)
y_test=[]
fileresult=open('photo/result2.txt','r')
for c in fileresult.readlines():
    tt=int(c)
    y_test.append(tt)
#print(y_train)
x_train=np.expand_dims(x_train, axis=3);
y_train=np.expand_dims(y_train, axis=1);
x_test=np.expand_dims(x_test, axis=3);
y_test=np.expand_dims(y_test, axis=1);
#print(x_train.shape)
#print(y_train)
model = Sequential()
model.add(Conv2D(8, (3, 3), padding='same',
                 input_shape=x_test.shape[1:]))
model.add(BatchNormalization(axis = 3))
model.add(Activation('relu'))
model.add(AveragePooling2D(pool_size=(2, 2),strides=2))
model.add(Conv2D(16, (3, 3),padding='same'))
model.add(BatchNormalization(axis = 3))
model.add(Activation('relu'))
model.add(AveragePooling2D(pool_size=(2, 2),strides=2))
model.add(Conv2D(32, (3, 3),padding='same'))
model.add(BatchNormalization(axis = 3))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3),padding='same'))
model.add(BatchNormalization(axis = 3))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1))
sgd = SGD(lr=0.000001, decay=1e-7, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
print(model.summary())
callbacks_list = [

    keras.callbacks.ModelCheckpoint(

        filepath='best_model.{epoch:02d}-{val_loss:.2f}.h5',

        monitor='val_loss', save_best_only=True),

    keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)

]
BATCH_SIZE = 8

EPOCHS = 300
print("shapes\n")


validation_split=0.1
num_val=10000*validation_split
num_train=10000-num_val

#print(x_train.shape)
#print(y_train.shape)
# history = model.fit_generator(data_generator(BATCH_SIZE),

#                             steps_per_epoch=max(1, num_train//BATCH_SIZE),

#                             validation_data=data_generator(BATCH_SIZE),

#                             validation_steps=max(1, num_val//BATCH_SIZE ),
#                             epochs=EPOCHS,
#                             initial_epoch=0,

#                             callbacks=callbacks_list,


#                             )
#print(x_train.shape)
#print(y_train.shape)
history = model.fit(x_train,

                      y_train,

                      batch_size=BATCH_SIZE,

                      epochs=EPOCHS,

                      callbacks=callbacks_list,
                      shuffle=True,

                      validation_split=0.1,

                      verbose=1)
output = model.predict(x_test)
err=map(abs,output-y_test)
print(len(err))
num=0
num1=0
num2=0
for c in err:
    if c<20:
        num+=1
    if c<45:
        num1+=1
    if c<10:
        num2+=1
print(num/5000.0)
print(num1/5000.0)
print(num2/5000.0)
plt.figure(figsize=(6, 4))

#plt.plot(history.history['acc'], "g--", label="Accuracy of training data")

#plt.plot(history.history['val_acc'], "g", label="Accuracy of validation data")

plt.plot(history.history['loss'], "r--", label="Loss of training data")

plt.plot(history.history['val_loss'], "r", label="Loss of validation data")

plt.title('Model Accuracy and Loss')

plt.ylabel('Accuracy and Loss')

plt.xlabel('Training Epoch')

plt.ylim(0)

plt.legend()

plt.show()
#model.load_weights('best_model.30-22.85.h5', by_name=False)

训练参数:

_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 28, 28, 8)         80        
_________________________________________________________________
batch_normalization_1 (Batch (None, 28, 28, 8)         32        
_________________________________________________________________
activation_1 (Activation)    (None, 28, 28, 8)         0         
_________________________________________________________________
average_pooling2d_1 (Average (None, 14, 14, 8)         0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 14, 14, 16)        1168      
_________________________________________________________________
batch_normalization_2 (Batch (None, 14, 14, 16)        64        
_________________________________________________________________
activation_2 (Activation)    (None, 14, 14, 16)        0         
_________________________________________________________________
average_pooling2d_2 (Average (None, 7, 7, 16)          0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 7, 7, 32)          4640      
_________________________________________________________________
batch_normalization_3 (Batch (None, 7, 7, 32)          128       
_________________________________________________________________
activation_3 (Activation)    (None, 7, 7, 32)          0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 7, 7, 32)          9248      
_________________________________________________________________
batch_normalization_4 (Batch (None, 7, 7, 32)          128       
_________________________________________________________________
activation_4 (Activation)    (None, 7, 7, 32)          0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 7, 7, 32)          0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 1568)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 1569      
=================================================================
Total params: 17,057
Trainable params: 16,881
Non-trainable params: 176
_________________________________________________________________

训练结果:

Epoch 1/300
2019-07-05 21:36:38.061295: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library libcublas.so.10.0 locally
4500/4500 [==============================] - 4s 828us/step - loss: 529.2516 - val_loss: 412.2183
Epoch 2/300
4500/4500 [==============================] - 2s 499us/step - loss: 261.9935 - val_loss: 205.1285
Epoch 3/300
4500/4500 [==============================] - 2s 522us/step - loss: 183.9583 - val_loss: 176.4640
Epoch 4/300
4500/4500 [==============================] - 2s 532us/step - loss: 157.9902 - val_loss: 165.5905
Epoch 5/300
4500/4500 [==============================] - 3s 600us/step - loss: 142.2254 - val_loss: 133.4456
Epoch 6/300
4500/4500 [==============================] - 3s 574us/step - loss: 126.0722 - val_loss: 120.4842
Epoch 7/300
4500/4500 [==============================] - 3s 561us/step - loss: 115.6117 - val_loss: 113.8038
Epoch 8/300
4500/4500 [==============================] - 2s 524us/step - loss: 103.8097 - val_loss: 110.5109
Epoch 9/300
4500/4500 [==============================] - 2s 509us/step - loss: 99.2515 - val_loss: 102.6878
Epoch 10/300
4500/4500 [==============================] - 2s 493us/step - loss: 90.2994 - val_loss: 104.8946
Epoch 11/300
4500/4500 [==============================] - 3s 561us/step - loss: 84.4916 - val_loss: 84.4557
Epoch 12/300
4500/4500 [==============================] - 3s 593us/step - loss: 80.1074 - val_loss: 98.9638
Epoch 13/300
4500/4500 [==============================] - 2s 553us/step - loss: 75.9412 - val_loss: 88.8961
Epoch 14/300
4500/4500 [==============================] - 2s 496us/step - loss: 71.1955 - val_loss: 71.7264
Epoch 15/300
4500/4500 [==============================] - 2s 508us/step - loss: 67.3599 - val_loss: 65.1363
Epoch 16/300
4500/4500 [==============================] - 3s 556us/step - loss: 63.9384 - val_loss: 74.2647
Epoch 17/300
4500/4500 [==============================] - 2s 500us/step - loss: 60.4251 - val_loss: 64.9483
Epoch 18/300
4500/4500 [==============================] - 2s 492us/step - loss: 58.4589 - val_loss: 79.7609
Epoch 19/300
4500/4500 [==============================] - 2s 495us/step - loss: 56.2028 - val_loss: 57.1882
Epoch 20/300
4500/4500 [==============================] - 2s 488us/step - loss: 54.4542 - val_loss: 54.6668
Epoch 21/300
4500/4500 [==============================] - 2s 495us/step - loss: 52.0168 - val_loss: 65.2611
Epoch 22/300
4500/4500 [==============================] - 2s 493us/step - loss: 49.6020 - val_loss: 56.3058
Epoch 23/300
4500/4500 [==============================] - 2s 492us/step - loss: 48.6968 - val_loss: 61.4040
Epoch 24/300
4500/4500 [==============================] - 2s 484us/step - loss: 47.2439 - val_loss: 51.7113
Epoch 25/300
4500/4500 [==============================] - 2s 496us/step - loss: 45.8646 - val_loss: 51.0462
Epoch 26/300
4500/4500 [==============================] - 2s 496us/step - loss: 43.7266 - val_loss: 57.8190
Epoch 27/300
4500/4500 [==============================] - 2s 493us/step - loss: 41.8024 - val_loss: 52.0391
Epoch 28/300
4500/4500 [==============================] - 2s 486us/step - loss: 41.0792 - val_loss: 49.6890
Epoch 29/300
4500/4500 [==============================] - 2s 493us/step - loss: 39.9201 - val_loss: 45.2138
Epoch 30/300
4500/4500 [==============================] - 2s 492us/step - loss: 38.7481 - val_loss: 42.9585
Epoch 31/300
4500/4500 [==============================] - 2s 494us/step - loss: 39.3088 - val_loss: 51.3753
Epoch 32/300
4500/4500 [==============================] - 2s 497us/step - loss: 36.7096 - val_loss: 45.1476
Epoch 33/300
4500/4500 [==============================] - 2s 491us/step - loss: 36.1465 - val_loss: 42.1468
Epoch 34/300
4500/4500 [==============================] - 2s 491us/step - loss: 36.1388 - val_loss: 41.8209
Epoch 35/300
4500/4500 [==============================] - 2s 502us/step - loss: 35.4413 - val_loss: 41.0889
Epoch 36/300
4500/4500 [==============================] - 2s 490us/step - loss: 34.7038 - val_loss: 41.5128
Epoch 37/300
4500/4500 [==============================] - 2s 493us/step - loss: 33.0647 - val_loss: 42.6657
Epoch 38/300
4500/4500 [==============================] - 2s 495us/step - loss: 32.4053 - val_loss: 40.4518
Epoch 39/300
4500/4500 [==============================] - 2s 487us/step - loss: 31.6384 - val_loss: 45.8528
Epoch 40/300
4500/4500 [==============================] - 2s 493us/step - loss: 31.7787 - val_loss: 38.3601
Epoch 41/300
4500/4500 [==============================] - 2s 499us/step - loss: 31.7154 - val_loss: 39.3345
Epoch 42/300
4500/4500 [==============================] - 2s 486us/step - loss: 31.1642 - val_loss: 43.8633
Epoch 43/300
4500/4500 [==============================] - 2s 495us/step - loss: 29.6413 - val_loss: 37.3922
Epoch 44/300
4500/4500 [==============================] - 2s 490us/step - loss: 29.5115 - val_loss: 41.6811
Epoch 45/300
4500/4500 [==============================] - 2s 488us/step - loss: 28.8164 - val_loss: 46.3363
Epoch 46/300
4500/4500 [==============================] - 2s 494us/step - loss: 28.7848 - val_loss: 45.4383
Epoch 47/300
4500/4500 [==============================] - 2s 496us/step - loss: 27.6349 - val_loss: 41.1156
Epoch 48/300
4500/4500 [==============================] - 2s 485us/step - loss: 26.6820 - val_loss: 36.0883
Epoch 49/300
4500/4500 [==============================] - 2s 488us/step - loss: 26.6154 - val_loss: 34.9007
Epoch 50/300
4500/4500 [==============================] - 2s 494us/step - loss: 26.0024 - val_loss: 36.3475
Epoch 51/300
4500/4500 [==============================] - 2s 490us/step - loss: 26.3216 - val_loss: 35.7313
Epoch 52/300
4500/4500 [==============================] - 2s 493us/step - loss: 26.1783 - val_loss: 35.7962
Epoch 53/300
4500/4500 [==============================] - 2s 495us/step - loss: 26.0408 - val_loss: 33.8485
Epoch 54/300
4500/4500 [==============================] - 2s 494us/step - loss: 25.8208 - val_loss: 51.6296
Epoch 55/300
4500/4500 [==============================] - 2s 498us/step - loss: 24.6244 - val_loss: 35.7905
Epoch 56/300
4500/4500 [==============================] - 2s 496us/step - loss: 24.4153 - val_loss: 36.1860
Epoch 57/300
4500/4500 [==============================] - 2s 497us/step - loss: 24.0448 - val_loss: 42.8698
Epoch 58/300
4500/4500 [==============================] - 2s 498us/step - loss: 24.4918 - val_loss: 35.1314
Epoch 59/300
4500/4500 [==============================] - 2s 495us/step - loss: 22.7746 - val_loss: 33.3021
Epoch 60/300
4500/4500 [==============================] - 2s 488us/step - loss: 23.2578 - val_loss: 34.9876
Epoch 61/300
4500/4500 [==============================] - 2s 498us/step - loss: 22.7299 - val_loss: 33.7600
Epoch 62/300
4500/4500 [==============================] - 2s 501us/step - loss: 23.5896 - val_loss: 36.9000
Epoch 63/300
4500/4500 [==============================] - 2s 491us/step - loss: 22.5004 - val_loss: 34.7175
Epoch 64/300
4500/4500 [==============================] - 2s 518us/step - loss: 22.4151 - val_loss: 31.8431
Epoch 65/300
4500/4500 [==============================] - 2s 528us/step - loss: 22.5390 - val_loss: 32.5396
Epoch 66/300
4500/4500 [==============================] - 3s 558us/step - loss: 21.3262 - val_loss: 36.8819
Epoch 67/300
4500/4500 [==============================] - 2s 505us/step - loss: 21.7721 - val_loss: 35.8961
Epoch 68/300
4500/4500 [==============================] - 2s 490us/step - loss: 21.8546 - val_loss: 32.9064
Epoch 69/300
4500/4500 [==============================] - 2s 507us/step - loss: 21.8813 - val_loss: 36.9778
Epoch 70/300
4500/4500 [==============================] - 2s 495us/step - loss: 21.4432 - val_loss: 29.8957
Epoch 71/300
4500/4500 [==============================] - 2s 492us/step - loss: 21.2658 - val_loss: 35.6055
Epoch 72/300
4500/4500 [==============================] - 2s 486us/step - loss: 20.4156 - val_loss: 31.9169
Epoch 73/300
4500/4500 [==============================] - 2s 491us/step - loss: 20.3294 - val_loss: 34.3695
Epoch 74/300
4500/4500 [==============================] - 2s 489us/step - loss: 19.9095 - val_loss: 32.4377
Epoch 75/300
4500/4500 [==============================] - 2s 490us/step - loss: 20.2715 - val_loss: 31.1379
Epoch 76/300
4500/4500 [==============================] - 2s 495us/step - loss: 19.5925 - val_loss: 30.7104
Epoch 77/300
4500/4500 [==============================] - 2s 489us/step - loss: 19.2491 - val_loss: 33.6524
Epoch 78/300
4500/4500 [==============================] - 2s 495us/step - loss: 19.8310 - val_loss: 34.4298
Epoch 79/300
4500/4500 [==============================] - 2s 534us/step - loss: 19.2930 - val_loss: 35.9891
Epoch 80/300
4500/4500 [==============================] - 2s 526us/step - loss: 20.0375 - val_loss: 30.6407
5000
0.9974
1.0
0.9282

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值