tensorflow-线性回归python入门

目录

读入库

构造数据

建立训练和测试数据

创建第一层到最后一层的神经网络

开始测试

sin函数回归


读入库

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time

构造数据

X = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
Y = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])

建立训练和测试数据

import sklearn.model_selection as sk
X_train, X_test, Y_train, Y_test =sk.train_test_split(X,Y,test_size=0.2, random_state = 42)

创建第一层到最后一层的神经网络

model = tf.contrib.keras.models.Sequential()
model.add(tf.contrib.keras.layers.Dense(units=1, activation=tf.nn.relu,input_dim=1))
model.summary()
# 開始搭建 model
# mse = mean square error
# sgd = stochastic gradient descent
model.compile(loss='mse',optimizer='sgd',metrics=['accuracy'])

model.fit(X_test, Y_test,
          epochs=4000,
          batch_size=len(Y_test))

开始测试

print("start testing")

cost = model.evaluate(X_test, Y_test)

print("test cost: {}".format(cost))

weights, biases = model.layers[0].get_weights()

print("Weights = {}, bias = {}".format(weights,biases))

打印测试结果

# 印出測試的結果
Y_pred = model.predict(X_test)
# 畫出 data
plt.scatter(X,Y, label='X,Y')
plt.scatter(X_test, Y_test, label='X_test, Y_test')
plt.scatter(X_test, Y_pred, label='pred')
# 畫出 線
x2 = np.linspace(0,1,100)
print(biases[0])
print(weights[0])

y2 =(weights[0]*x2+biases[0])
plt.plot(x2, y2, '-r', label='weights')

plt.show()

sin函数回归


np.random.seed(int(time.time())) 生成随机数
num=100  随机数100个
X = np.linspace(-4,4,num)  进行线性等分
np.random.shuffle(X)  打乱
Y = 0.1*np.sin(X)  计算变量

#!/usr/bin/env python
# -*- coding=utf-8 -*-
import tensorflow as tf
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
import time

# 製造 data (共200筆)
np.random.seed(int(time.time()))
num=100
X = np.linspace(-4,4,num)
np.random.shuffle(X)
Y = 0.1*np.sin(X)

# 建立 trainig 與 testing data

x_train,x_test,y_train,y_test = train_test_split(X,Y,test_size=0.1)

# 建立 neural network from the first layer to last layer

model = tf.keras.models.Sequential([
   tf.keras.layers.Dense(units=100, activation=tf.nn.tanh, input_dim=1),
   tf.keras.layers.Dense(units=100, activation=tf.nn.tanh),
   tf.keras.layers.Dense(units=1, activation=tf.nn.tanh),
])

# 除了第一層以外,定義第二層以上時,不需要定義 input dimension,因為第二層 input 就是第一層的 input

# 開始搭建 model
# mse = mean square error
# sgd = stochastic gradient descent
model.compile(loss='mse',optimizer='sgd', metrics=['acc'])


# training
print("start training")
for step in range(20000):
    cost = model.train_on_batch(x_train, y_train)  #
    if step % 20 == 0:
        #print("train cost{}".format(cost))
        W, b = model.layers[0].get_weights()
        print("step{} Weights = {}, bias = {} train cost{}".format(step,W, b, cost))
        plt.cla()
        # 畫出 data
        plt.scatter(X, Y)
        #X_test2=[-1,1]
        y_pred2 = model.predict(X)  # Y predict
        plt.scatter(X, y_pred2, color='blue')
        plt.text(0, -0.05, 'epoch: %d  ,cost=%.2f '
                 % (step,cost[0]), fontdict={'size': 10, 'color': 'red'})
        plt.pause(0.01)




其他数据训练示例

#!/usr/bin/env python

# -*- coding=utf-8 -*-

import tensorflow as tf

from sklearn.model_selection import train_test_split

import numpy as np

import matplotlib.pyplot as plt

import time

import pandas as pd

from tensorflow.keras.datasets import boston_housing

import matplotlib.pyplot as plt

(x_train, y_train), (x_test, y_test) = boston_housing.load_data()

print(x_train.shape)

print(y_train.shape)

classes = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']

data = pd.DataFrame(x_train, columns=classes)

print(data.head())

data['MEDV'] = pd.Series(data=y_train)

print(data.head())

print(data.describe())  # get some basic stats on the dataset

import seaborn as sns

from sklearn import preprocessing

scaler = preprocessing.MinMaxScaler()

x_train = scaler.fit_transform(x_train)

x_test = scaler.fit_transform(x_test)

model =tf.keras.models.Sequential()

model.add(tf.keras.layers.Dense(320, activation='relu', input_shape=[x_train.shape[1]]))

model.add(tf.keras.layers.Dense(640, activation='relu'))

model.add(tf.keras.layers.Dense(640, activation='relu'))

model.add(tf.keras.layers.Dense(1))

try:

    with open('model2.h5', 'r') as load_weights:

        # 讀取模型權重

        model.load_weights("model2.h5")

except IOError:

    print("File not exists")

learning_rate = 0.0001

opt1 = tf.keras.optimizers.Nadam(lr=learning_rate)

model.compile(loss='mse', optimizer=opt1, metrics=['mae'])

history1 = []

for step in range(40000):

    cost = model.train_on_batch(x_train, y_train)

    if step % 20 == 0:

        print("step{}   train cost{}".format(step, cost))

        # 保存模型架構

        with open("model2.json", "w") as json_file:

            json_file.write(model.to_json())

        # 保存模型權重

        model.save_weights("model2.h5")

# testing

print("start testing")

cost = model.evaluate(x_test, y_test)

print("test cost: {}".format(cost))

Y_pred2 = model.predict(x_test)  # Y predict

print(Y_pred2[:10])

print(y_test[:10])


 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

三十度角阳光的问候

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值