Deep Learning: Keras + Boston_housing

Predicting house price: a regression example

# -*- coding: utf-8 -*-

"""
@Date: 2018/10/7

@Author: dreamhomes

@Summary:
"""

from keras.datasets import boston_housing
from keras import models
from keras import layers

import numpy as np
import matplotlib.pyplot as plt

(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()

# print(train_data[0])
mean = train_data.mean(axis=0)
std = train_data.std(axis=0)
train_data -= mean
train_data /= std

test_data -= mean
test_data /= std


def build_model():
    """
    Instantiate the same model multiple times
    :return:
    """
    model = models.Sequential()
    model.add(layers.Dense(64, activation='relu', input_shape=(13,)))
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(1))
    model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
    return model


# # K-fold cross-validation
#
# k = 4
# num_val_samples = len(train_data) // k
# num_epochs = 500
# all_scores = []
#
# # Saving the validation logs at each fold
# all_mae_histories = []
#
# for i in range(k):
#     print('processing fold #', i)
#     val_data = train_data[i * num_val_samples:(i + 1) * num_val_samples]
#     val_targets = train_targets[i * num_val_samples:(i + 1) * num_val_samples]
#
#     partial_train_data = np.concatenate(
#         [train_data[:i * num_val_samples], train_data[(i + 1) * num_val_samples:]], axis=0)
#     partial_train_targets = np.concatenate(
#         [train_targets[:i * num_val_samples], train_targets[(i + 1) * num_val_samples:]], axis=0)
#
#     model = build_model()
#     history = model.fit(
#         partial_train_data,
#         partial_train_targets,
#         validation_data=(val_data, val_targets),
#         epochs=num_epochs,
#         batch_size=1,
#         verbose=0)
#     # val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
#     # all_scores.append(val_mae)
#     # print(all_scores)
#     mae_history = history.history['val_mean_absolute_error']
#     all_mae_histories.append(mae_history)
#
# average_mae_history = [
#     np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
#
# plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
# plt.xlabel('Epochs')
# plt.ylabel('Validation MAE')
# plt.show()

# Training the final model
model = build_model()
model.fit(train_data, train_targets, epochs=80, batch_size=16, verbose=1)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)

print(test_mae_score)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值