使用Tensorflow实现简单的线性回归和神经网络

一、流程

整体流程基本分为三步:

1、定义计算图的结构

2、定义损失函数及选择优化算法

3、生成会话,训练,预测

二、线性回归

import tensorflow as tf
import numpy as np


class LinearRegression:
    def __init__(self, dim, lr=0.01):
        self.dim = dim
        self.lr = lr
        self.X = tf.placeholder(dtype=tf.float32, shape=[None, dim], name='input_X')
        self.y = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='input_y')
        self.yhat = self._build_graph(self.X)
        self.loss = tf.reduce_mean(tf.square(self.y - self.yhat))
        self.train_op = tf.train.GradientDescentOptimizer(self.lr).minimize(self.loss)
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

    def _build_graph(self, X):
        Weights = tf.Variable(tf.random_normal([self.dim, 1]))
        bias = tf.Variable(tf.random_normal([1, 1]))
        output = tf.matmul(X, Weights) + bias
        return output

    def fit(self, X, y, epoch):
        for i in range(epoch):
            loss = self.sess.run(self.loss, feed_dict={self.X: X, self.y: y})
            print('Epoch', i, ', loss:', loss)
            self.sess.run(self.train_op, feed_dict={self.X: X, self.y: y})

    def predict(self, X):
        return self.sess.run(self.yhat, feed_dict={self.X: X})


def run():
    x_data = 5 - 10 * np.random.random([200, 3])  # x: [-5, 5]
    noise = np.random.normal(0, 0.02, x_data.shape[0])
    y = 2 * x_data[:, 0] + 5 * x_data[:, 1] - 3 * x_data[:, 2] + 1 + noise  # y = 2*x1 + 5*x2 - 3*x3 + 1
    y = y.reshape(-1, 1)
    x_train, y_train = x_data[:150], y[:150]
    x_test, y_test = x_data[150:], y[150:]

    model = LinearRegression(x_data.shape[1])
    model.fit(x_train, y_train, epoch=128)
    y_pred = model.predict(x_test)
    loss = np.mean(np.square(y_pred - y_test))
    print('Test loss:', loss)


if __name__ == '__main__':
    run()

三、神经网络

import tensorflow as tf
import numpy as np
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split


class SimpleNN:
    def __init__(self, in_dim, lr=0.01):
        self.in_dim = in_dim
        self.lr = lr
        self.X = tf.placeholder(dtype=tf.float32, shape=[None, self.in_dim], name='input_x')
        self.y = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='input_y')
        self.yhat = self._build_graph(self.X)
        self.loss = tf.reduce_mean(tf.square(self.y - self.yhat))
        self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

    def _build_graph(self, X):  # 3*5*3
        l1 = self._add_layers(X, self.in_dim, 256, tf.nn.tanh)
        l2 = self._add_layers(l1, 256, 128, tf.nn.tanh)
        l3 = self._add_layers(l2, 128, 64, tf.nn.tanh)
        output = self._add_layers(l3, 64, 1)
        return output

    def _add_layers(self, inputs, in_size, out_size, activation_function=None):
        Weights = tf.Variable(tf.random_normal([in_size, out_size]))
        bias = tf.Variable(tf.random_normal([1, out_size]))
        out = tf.matmul(inputs, Weights) + bias
        if activation_function is not None:
            out = activation_function(out)
        return out

    def fit(self, X, y, epoch):
        for i in range(epoch):
            loss = self.sess.run(self.loss, feed_dict={self.X: X, self.y: y})
            print('Epoch', i, ', loss:', loss)
            self.sess.run(self.train_op, feed_dict={self.X: X, self.y: y})

    def predict(self, X):
        return self.sess.run(self.yhat, feed_dict={self.X: X})


def run():
    X, y = load_boston(return_X_y=True)
    y = y.reshape(-1, 1)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
    print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
    model = SimpleNN(X.shape[1])
    model.fit(X_train, y_train, epoch=160)
    y_pred = model.predict(X_test)
    loss = np.mean(np.square(y_pred - y_test))
    print('Test loss:', loss)


if __name__ == '__main__':
    run()

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值