keras以tensorflow为后端的回归问题

import numpy as np
np.random.seed(1337)
from keras.models import  Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt

# 创建一些数据
X = np.linspace(-1,1,50)
np.random.shuffle(X)
Y = 25 * X + 100 + np.random.normal(0,0.05,(50,))

# 数据图
plt.scatter (X,Y)
plt.show()

X_train,Y_train = X[:36],Y[:36]
X_test ,Y_test  = X[36:],Y[36:]

# 构建神经网络
model = Sequential()
model.add(Dense(output_dim=1,input_dim=1))
#model.add(Dense(output_dim=1,))

# 选择优化器和损失函数
model.compile(loss='mse',optimizer='sgd')

# 训练
print("Training.........................")
for step in range(501):
    cost = model.train_on_batch(X_train,Y_train)
    if step % 10 == 0:
        print('train cost',cost)

# 测试
print("Testing..........................")
cost = model.evaluate(X_test,Y_test,batch_size=14)
print('test cost',cost)
W,b = model.layers[0].get_weights()
print('Weight=',W,'\nbiase',b)

# 画出预测
Y_pred = model.predict(X_test)
plt.scatter(X_test,Y_test)
plt.plot(X_test,Y_pred)
plt.show()
阅读更多
个人分类: 程序问题
想对作者说点什么? 我来说一句

没有更多推荐了,返回首页

关闭
关闭
关闭