反向传播算法numpy运算用例

8 篇文章 0 订阅
# 图表绘图库
import matplotlib.pyplot as plt
# 数据计算库
import numpy as np
# 视图可视化库, 基于matplotlib
import seaborn as sns
# 色彩图库
from matplotlib import cm
# 获取数据集的方法
from sklearn.datasets import make_moons
# 分离训练和测试集方法
from sklearn.model_selection import train_test_split

# 选取2000样本
N_SAMPLES = 2000
# 测试集比例 2000 * 0.3 == 600
TEST_SIZE = 0.3

# 获取数据集
X, y = make_moons(n_samples=N_SAMPLES, noise=0.2, random_state=100)
# 分离出训练集和测试集, random_state 随机因子
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=42)


# 绘制散点图
def make_plot(X, y, plot_name, file_name=None, XX=None, YY=None, preds=None,
			  dark=False):
    # 是否是暗黑风格
	if (dark):
		plt.style.use('dark_background')
	else:
		sns.set_style("whitegrid")

    # 图片绘制尺寸
	plt.figure(figsize=(16, 12))

    # 设置坐标轴
	axes = plt.gca()
	axes.set(xlabel="$x_1$", ylabel="$x_2$")

    # 设置图片标题
	plt.title(plot_name, fontsize=30)

    # 设置左右自适应
	plt.subplots_adjust(left=0.20)
	plt.subplots_adjust(right=0.80)

    绘制X集、Y集、测试集
	if (XX is not None and YY is not None and preds is not None):
		plt.contourf(XX, YY, preds.reshape(XX.shape), 25, alpha=1,
					 cmap=cm.Spectral)
		plt.contour(XX, YY, preds.reshape(XX.shape), levels=[.5],
					cmap="Greys", vmin=0, vmax=.6)
	# 绘制散点图,根据标签区分颜色
	plt.scatter(X[:, 0], X[:, 1], c=y.ravel(), s=40, cmap=plt.cm.Spectral,
				edgecolors='none')

    # 保存图片
	plt.savefig('dataset.svg')
	plt.close()

# 绘制训练集
make_plot(X, y, "Classification Dataset Visualization ", dark=True)
plt.show()

# 图层类
class Layer:
    # n_input 输入节点数
    # n_neurons 输出节点数
    # activation 激活函数
    # weights w参集
    # bias b参集
	def __init__(self, n_input, n_neurons, activation=None, weights=None, bias=None):
        # y = w*x + b
        # w参集
		self.weights = weights if weights is not None else np.random.randn(n_input, n_neurons) * np.sqrt(1/n_neurons)
        # b参集
		self.bias = bias if bias else np.random.rand(n_neurons) * 0.1
        # 激活函数
		self.activation = activation
        # 最终输出节点集
		self.last_activation = None
        # 节点导数
		self.error = None
        # 结果误差
		self.delta = None

    # 获取节点输出
	def activate(self, x):
		r = np.dot(x, self.weights) + self.bias
		self.last_activation = self._apply_activation(r)
		return self.last_activation
    
    # 激活函数的导数
	def _apply_activation(self, r):
		if self.activation is None:
			return r
		elif self.activation == 'relu':
			return np.maximum(r, 0)
		elif self.activation == 'tanh':
			return np.tanh(r)
		elif self.activation == 'sigmoid':
			return 1 / (1 + np.exp(-r))
		return r
    
    # 激活函数运算
	def apply_activation_derivative(self, r):
		if self.activation is None:
			return np.ones_like(r)
		elif self.activation == 'relu':
			grad = np.array(r, copy=True)
			grad[r > 0] = 1.
			grad[r <= 0] = 0.
			return grad
		elif self.activation == 'tanh':
			return 1 - r**2
		elif self.activation == 'sigmoid':
			return r * (1 - r)
		return r

# 网络类
class NeuralNetwork:
	def __init__(self):
		self._layers = []
    
    # 添加网络层层
	def add_layer(self, layer):
		self._layers.append(layer)

    # 前向依次计算
	def feed_forward(self, X):
		for layer in self._layers:
			X = layer.activate(X)
		return X

    # 反向传播计算
	def backpropagation(self, X, y, learning_rate):
        # 获取前向计算结果
		output = self.feed_forward(X)
        # 图层反向遍历
		for i in reversed(range(len(self._layers))):
            # 从最后一个图层起
			layer = self._layers[i]
            # 依次反向计算
			if layer == self._layers[-1]:
				layer.error = y - output
				layer.delta = layer.error * layer.apply_activation_derivative(output)
			else:
				next_layer = self._layers[i + 1]
				layer.error = np.dot(next_layer.weights, next_layer.delta)
				layer.delta = layer.error *                 layer.apply_activation_derivative(layer.last_activation)
        
		for i in range(len(self._layers)):
			layer = self._layers[i]
			o_i = np.atleast_2d(X if i == 0 else self._layers[i-1].last_activation)
			layer.weights += layer.delta * o_i.T * learning_rate

    # 训练过程
	def train(self, X_train, X_test, y_train, y_test, learning_rate, max_epochs):
        # 获取训练集的独热编码
		y_onehot = np.zeros((y_train.shape[0], 2))
		y_onehot[np.arange(y_train.shape[0]), y_train] = 1
		mses = []
		accuracy = []
        # 循环训练
		for i in range(max_epochs):
            # 对训练集进行训练, 利用反向传播计算
			for j in range(len(X_train)):
				self.backpropagation(X_train[j], y_onehot[j], learning_rate)
            # 每十次获取误差和精度, 加入数组用于绘图
			if i % 10 == 0:
				mse = np.mean(np.square(y_onehot - self.feed_forward(X_train)))
				mses.append(mse)
				print('Epoch: #%s, MSE: %f' % (i, float(mse)))
				acc = self.accuracy(self.predict(X_test), y_test.flatten()) * 100
				print('Accuracy: %.2f%%' % (acc * 100))
				accuracy.append(acc)
		return mses, accuracy

    # 获取误差
	def accuracy(self, y_output, y_test):
		return np.mean((np.argmax(y_output, axis=1) == y_test))

    # 利用测试集测试精度
	def predict(self, X_test):
		return self.feed_forward(X_test)

# 构建网络, 添加4层, 起始节点为2, 最终输出节点为2
nn = NeuralNetwork()
nn.add_layer(Layer(2, 25, 'sigmoid'))
nn.add_layer(Layer(25, 50, 'sigmoid'))
nn.add_layer(Layer(50, 25, 'sigmoid'))
nn.add_layer(Layer(25, 2, 'sigmoid'))

# 获取其中的误差和精度
mses, accuracy = nn.train(X_train, X_test, y_train, y_test, 0.01, 1000)

# 分别绘制误差和精度变化过程
plt.figure()
plt.plot(mses, 'b', label='MSE Loss')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend()
plt.savefig('exam5.2 MSE Loss.png')
plt.show()

plt.figure()
plt.plot(accuracy, 'r', label='Accuracy rate')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('exam5.2 Accuracy.png')
plt.show()

数据集散点图

精度变化图

误差变换图

实际运算结果图例可能不一, 但大体趋势一致...

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值