python神经网络回归自然_吴裕雄--天生自然 python数据分析:基于Keras使用CNN神经网络处理手写数据集...

d189f1f86bf7d0b2af633ba9cd3ae748.png

importpandas as pdimportnumpy as npimportmatplotlib.pyplot as pltimportmatplotlib.image as mpimgimportseaborn as sns%matplotlib inline

np.random.seed(2)from sklearn.model_selection importtrain_test_splitfrom sklearn.metrics importconfusion_matriximportitertoolsfrom keras.utils.np_utils import to_categorical #convert to one-hot-encoding

from keras.models importSequentialfrom keras.layers importDense, Dropout, Flatten, Conv2D, MaxPool2Dfrom keras.optimizers importRMSpropfrom keras.preprocessing.image importImageDataGeneratorfrom keras.callbacks importReduceLROnPlateau

sns.set(style='white', context='notebook', palette='deep')

#Load the data

train = pd.read_csv("F:\\kaggleDataSet\MNSI\\train.csv")

test= pd.read_csv("F:\\kaggleDataSet\MNSI\\test.csv")

Y_train = train["label"]#Drop 'label' column

X_train = train.drop(labels = ["label"],axis = 1)#free some space

deltrain

g=sns.countplot(Y_train)

Y_train.value_counts()

1ae6c03ec6a516de5045aeb2a180730d.png

#Check the data

X_train.isnull().any().describe()

02176e0ead59c29b6f01a1643942de71.png

test.isnull().any().describe()

66f366f514aaac6e3764eb6f723b0113.png

#Normalize the data

X_train = X_train / 255.0test= test / 255.0

#Reshape image in 3 dimensions (height = 28px, width = 28px , canal = 1)

X_train = X_train.values.reshape(-1,28,28,1)

test= test.values.reshape(-1,28,28,1)

#Encode labels to one hot vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0])

Y_train = to_categorical(Y_train, num_classes = 10)

#Set the random seed

random_seed = 2

#Split the train and the validation set for the fitting

X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed)

#Some examples

g = plt.imshow(X_train[0][:,:,0])

6a00ee8d7d817dfab866bb18123a455d.png

#Set the CNN model#my CNN architechture is In -> [[Conv2D->relu]*2 -> MaxPool2D -> Dropout]*2 -> Flatten -> Dense -> Dropout -> Out

model=Sequential()

model.add(Conv2D(filters= 32, kernel_size = (5,5),padding = 'Same',

activation='relu', input_shape = (28,28,1)))

model.add(Conv2D(filters= 32, kernel_size = (5,5),padding = 'Same',

activation='relu'))

model.add(MaxPool2D(pool_size=(2,2)))

model.add(Dropout(0.25))

model.add(Conv2D(filters= 64, kernel_size = (3,3),padding = 'Same',

activation='relu'))

model.add(Conv2D(filters= 64, kernel_size = (3,3),padding = 'Same',

activation='relu'))

model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))

model.add(Dropout(0.25))

model.add(Flatten())

model.add(Dense(256, activation = "relu"))

model.add(Dropout(0.5))

model.add(Dense(10, activation = "softmax"))

#Define the optimizer

optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)

#Compile the model

model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])

#Set a learning rate annealer

learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',

patience=3,

verbose=1,

factor=0.5,

min_lr=0.00001)

epochs = 1 #Turn epochs to 30 to get 0.9967 accuracy

batch_size = 86

#Without data augmentation i obtained an accuracy of 0.98114

history = model.fit(X_train, Y_train, batch_size = batch_size, epochs =epochs,

validation_data= (X_val, Y_val), verbose = 2)

f8268bc3c2c1cd46c76b42b8ede7c4a3.png

#With data augmentation to prevent overfitting (accuracy 0.99286)

datagen=ImageDataGenerator(

featurewise_center=False, #set input mean to 0 over the dataset

samplewise_center=False, #set each sample mean to 0

featurewise_std_normalization=False, #divide inputs by std of the dataset

samplewise_std_normalization=False, #divide each input by its std

zca_whitening=False, #apply ZCA whitening

rotation_range=10, #randomly rotate images in the range (degrees, 0 to 180)

zoom_range = 0.1, #Randomly zoom image

width_shift_range=0.1, #randomly shift images horizontally (fraction of total width)

height_shift_range=0.1, #randomly shift images vertically (fraction of total height)

horizontal_flip=False, #randomly flip images

vertical_flip=False) #randomly flip images

datagen.fit(X_train)

#Fit the model

history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),

epochs= epochs, validation_data =(X_val,Y_val),

verbose= 2, steps_per_epoch=X_train.shape[0] //batch_size

, callbacks=[learning_rate_reduction])

b8f75bcc6f3fc0a3333539918654e6ca.png

#Plot the loss and accuracy curves for training and validation

fig, ax = plt.subplots(2,1)

ax[0].plot(history.history['loss'], color='b', label="Training loss")

ax[0].plot(history.history['val_loss'], color='r', label="validation loss",axes =ax[0])

legend= ax[0].legend(loc='best', shadow=True)

ax[1].plot(history.history['acc'], color='b', label="Training accuracy")

ax[1].plot(history.history['val_acc'], color='r',label="Validation accuracy")

legend= ax[1].legend(loc='best', shadow=True)

fc9da7ff28f5ef64e126cf047743dd52.png

#Look at confusion matrix

defplot_confusion_matrix(cm, classes,

normalize=False,

title='Confusion matrix',

cmap=plt.cm.Blues):"""This function prints and plots the confusion matrix.

Normalization can be applied by setting `normalize=True`."""plt.imshow(cm, interpolation='nearest', cmap=cmap)

plt.title(title)

plt.colorbar()

tick_marks=np.arange(len(classes))

plt.xticks(tick_marks, classes, rotation=45)

plt.yticks(tick_marks, classes)ifnormalize:

cm= cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

thresh= cm.max() / 2.for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):

plt.text(j, i, cm[i, j],

horizontalalignment="center",

color="white" if cm[i, j] > thresh else "black")

plt.tight_layout()

plt.ylabel('True label')

plt.xlabel('Predicted label')#Predict the values from the validation dataset

Y_pred =model.predict(X_val)#Convert predictions classes to one hot vectors

Y_pred_classes = np.argmax(Y_pred,axis = 1)#Convert validation observations to one hot vectors

Y_true = np.argmax(Y_val,axis = 1)#compute the confusion matrix

confusion_mtx =confusion_matrix(Y_true, Y_pred_classes)#plot the confusion matrix

plot_confusion_matrix(confusion_mtx, classes = range(10))

54d04b74df220c1e80b08d501857b492.png

#Display some error results

#Errors are difference between predicted labels and true labels

errors = (Y_pred_classes - Y_true !=0)

Y_pred_classes_errors=Y_pred_classes[errors]

Y_pred_errors=Y_pred[errors]

Y_true_errors=Y_true[errors]

X_val_errors=X_val[errors]defdisplay_errors(errors_index,img_errors,pred_errors, obs_errors):"""This function shows 6 images with their predicted and real labels"""n=0

nrows= 2ncols= 3fig, ax= plt.subplots(nrows,ncols,sharex=True,sharey=True)for row inrange(nrows):for col inrange(ncols):

error=errors_index[n]

ax[row,col].imshow((img_errors[error]).reshape((28,28)))

ax[row,col].set_title("Predicted label :{}\nTrue label :{}".format(pred_errors[error],obs_errors[error]))

n+= 1

#Probabilities of the wrong predicted numbers

Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1)#Predicted probabilities of the true values in the error set

true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))#Difference between the probability of the predicted label and the true label

delta_pred_true_errors = Y_pred_errors_prob -true_prob_errors#Sorted list of the delta prob errors

sorted_dela_errors =np.argsort(delta_pred_true_errors)#Top 6 errors

most_important_errors = sorted_dela_errors[-6:]#Show the top 6 errors

display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)

1fedde4e4c43f211c5efb7747ac0a3b0.png

#predict results

results =model.predict(test)#select the indix with the maximum probability

results = np.argmax(results,axis = 1)

results= pd.Series(results,name="Label")

submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)

submission.to_csv("cnn_mnist_datagen.csv",index=False)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
基于C++&OPENCV 的全景图像拼接 C++是一种广泛使用的编程语言,它是由Bjarne Stroustrup于1979年在新泽西州美利山贝尔实验室开始设计开发的。C++是C语言的扩展,旨在提供更强大的编程能力,包括面向对象编程和泛型编程的支持。C++支持数据封装、继承和多态等面向对象编程的特性和泛型编程的模板,以及丰富的标准库,提供了大量的数据结构和算法,极大地提高了开发效率。12 C++是一种静态类型的、编译式的、通用的、大小写敏感的编程语言,它综合了高级语言和低级语言的特点。C++的语法与C语言非常相似,但增加了许多面向对象编程的特性,如类、对象、封装、继承和多态等。这使得C++既保持了C语言的低级特性,如直接访问硬件的能力,又提供了高级语言的特性,如数据封装和代码重用。13 C++的应用领域非常广泛,包括但不限于教育、系统开发、游戏开发、嵌入式系统、工业和商业应用、科研和高性能计算等领域。在教育领域,C++因其结构化和面向对象的特性,常被选为计算机科学和工程专业的入门编程语言。在系统开发领域,C++因其高效性和灵活性,经常被作为开发语言。游戏开发领域中,C++由于其高效性和广泛应用,在开发高性能游戏和游戏引擎中扮演着重要角色。在嵌入式系统领域,C++的高效和灵活性使其成为理想选择。此外,C++还广泛应用于桌面应用、Web浏览器、操作系统、编译器、媒体应用程序、数据库引擎、医疗工程和机器人等领域。16 学习C++的关键是理解其核心概念和编程风格,而不是过于深入技术细节。C++支持多种编程风格,每种风格都能有效地保证运行时间效率和空间效率。因此,无论是初学者还是经验丰富的程序员,都可以通过C++来设计和实现新系统或维护旧系统。3

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值