Python与神经网络1.6:训练我们的神经网络

参考网站:https://www.python-course.eu/dividing_lines_between_classes.php
为何需要训练我们的神经网络,上一节我们创建了一个神经网络,我们是那样幸运,规定的权重系数完美可以完场将我们的两类点分开的任务,但是这样的好事可能只能出现我们文章里,绝大多数时候,我们是无法得知合适权重系数,这就需要用一批已知分类的数据训练我们的系统,使我们的系统更加完善。

import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from collections import Counter
from sklearn.model_selection import train_test_split
import random


def points_within_circle(radius,center=(0,0),number_of_points=100):
    center_x, center_y = center
    r = radius * np.sqrt(np.random.random((number_of_points,)))
    theta = np.random.random((number_of_points,))*2*np.pi
    x = center_x + r * np.cos(theta)
    y = center_y + r * np.sin(theta)
    return x, y

point1_x, point1_y = points_within_circle(2, (6, 2), 100)
point2_x, point2_y = points_within_circle(2, (2, 6), 100)

point1s = list(zip(point1_x, point1_y))
point2s = list(zip(point2_x, point2_y))

labelled_data = list(zip(point1s + point2s,[0]*len(point1s)+[1]*len(point2s)))
random.shuffle(labelled_data)
data,labels = zip(*labelled_data)
res = train_test_split(data,labels,
                       train_size = 0.8,
                       test_size = 0.2,
                       random_state = 42)
train_data, test_data,train_labels,test_labels = res
print(train_data[:10], train_labels[:10])

class Perceptron:
    def __init__(self,
                 weights,
                 learning_rate=0.1):
        self.weights = np.array(weights)
        self.learning_rate = learning_rate
    def unit_step_function(x):
        if x < 0:
            return 0
        else:
            return 1
    def __call__(self, in_data):
        weighted_input = self.weights * in_data
        weighted_sum = weighted_input.sum()
        return Perceptron.unit_step_function(weighted_sum)
    def adjust(self,
               target_result,
               calculated_result,
               in_data):
        if type(in_data) != np.ndarray:
            in_data = np.array(in_data)
        error = target_result - calculated_result
        if error != 0:
            correction = error* in_data * self.learning_rate
            self.weights += correction
    def evaluate(self, data, labels):
        evaluation = Counter()
        for index in range(len(data)):
            label = int(round(p(data[index]),0))
            if label == labels[index]:
                evaluation["correct"] +=1
            else:
                evaluation["wrong"] +=1
        return evaluation

p = Perceptron(weights = [0.1, 0.1],
               learning_rate = 0.3)

for index in range(len(train_data)):
    p.adjust(train_labels[index],
             p(train_data[index]),
             train_data[index])
evaluation = p.evaluate(train_data,train_labels)
print(evaluation.most_common())
evaluation = p.evaluate(test_data,test_labels)
print(evaluation.most_common())
print(p.weights)

X = np.arange(0, 7)
fig, ax = plt.subplots()

lemons = [train_data[i] for i in range(len(train_data)) if train_labels[i]==1]
lemons_x, lemons_y = zip(*lemons)
oranges = [train_data[i] for i in range(len(train_data)) if train_labels[i]==0]
oranges_x, oranges_y = zip(*oranges)

ax.scatter(oranges_x,oranges_y,c = "orange")
ax.scatter(lemons_x,lemons_y,c = "y")

w1 = p.weights[0]
w2 = p.weights[1]
m = -w1/w2
ax.plot(X, m*X,label="decision boundary")
ax.legend()
print(p.weights)
print(m)


p = Perceptron(weights=[0.1, 0.1],
               learning_rate=0.3)
number_of_colors = 7
colors = cm.rainbow(np.linspace(0, 1, number_of_colors))

fig1, ax1 = plt.subplots()
ax1.set_xticks(range(8))
ax1.set_ylim([-2, 8])

counter = 0
for index in range(len(train_data[0:160])):
    old_weights = p.weights.copy()
    p.adjust(train_labels[index],
             p(train_data[index]),
             train_data[index])
    if not np.array_equal(old_weights, p.weights):
        color = "orange" if train_labels[index] == 0 else "y"
        ax1.scatter(train_data[index][0],
                    train_data[index][1],
                    color=color)
        ax1.annotate(str(counter),
                    (train_data[index][0], train_data[index][1]))
        m = -p.weights[0] / p.weights[1]
        print(index, m, p.weights, train_data[index])
        if counter < number_of_colors:
            ax1.plot(X, m * X, label=str(counter), color=colors[counter])
        else:
            ax1.plot(X, m * X, label=str(counter), color="black")
        counter += 1
        print(counter)
ax1.legend()
plt.show()

通过上述代码,可以用部分数据训练了我们的系统,寻找到较优的权重值,同时,准备了部分测试时间,测试我们的分类系统的准确性,事实证明,我们的系统很准确。
在这里插入图片描述
同时我们可视化了我们的搜寻过程
在这里插入图片描述

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值