用python实现BP神经网络预测运动员的跳高成绩

利用前14位运动员的运动成绩为训练样本,预测第15为运动员的跳高成绩。具体数据如下:

序号跳高成绩()30行进跑(s)立定三级跳远()助跑摸高()助跑4—6步跳高()负重深蹲杠铃()杠铃半蹲系数100(s)抓举()
12.243.29.63.452.151402.811.050
22.333.210.33.752.21203.410.970
32.243.09.03.52.21403.511.450
42.323.210.33.652.21502.810.880
52.23.210.13.52801.511.350
62.273.410.03.42.151303.211.560
72.23.29.63.552.11303.511.865
82.263.09.03.52.11001.811.340
92.23.29.63.552.11303.511.865
102.243.29.23.52.11402.511.050
112.243.29.53.42.151152.811.950
122.23.99.03.12.0802.213.050
132.23.19.53.62.1902.711.170
142.353.29.73.452.151304.610.8570
153.09.33.32.051002.811.250

程序代码如下:

import numpy as np
import scipy.special as sp
class net:
    def __init__(self, sample_data, output_data, hidden_num1,error=0.001, rate=0.8):
        sample_num, input_num = np.shape(sample_data)
        self.sample_data = sample_data  ##每个选手的各项成绩作为一行
        self.output_data = output_data
        self.sample_num = sample_num
        self.input_num = input_num
        self.hidden_num1 = hidden_num1
        self.rate = rate
        self.error = error

        self.max_data = max(self.output_data)
        self.min_data = min(self.output_data)
        self.output_data = (self.output_data-self.min_data)/(self.max_data-self.min_data)


        ##初始化权重矩阵
        self.w1 = np.random.rand(hidden_num1, input_num) * 2 - 1
        self.w2 = np.random.rand(hidden_num2, hidden_num1) * 2 - 1
        self.w3 = np.random.rand(1, hidden_num2) * 2 - 1

        self.b1 = np.random.rand(hidden_num1, 1) * 2 - 1
        self.b2 = np.random.rand(hidden_num2, 1) * 2 - 1
        self.b3 = np.random.rand(1,1) * 2 - 1

        self.train()

    def train(self):
        for i in range(self.sample_num):
            input_data = self.sample_data[i]
            lable  = self.output_data[i]
            input_data = input_data.reshape(self.input_num,1)
            lable = lable.reshape(1,1)

            input_hidden1 = np.dot(self.w1, input_data)+self.b1
            output_hidden1 = sp.expit(input_hidden1)
            output_hidden1 = output_hidden1.reshape(hidden_num1,1)
            input_hidden2 = np.dot(self.w2, output_hidden1)+self.b2
            output_hidden2 = sp.expit(input_hidden2)
            output_hidden2 = output_hidden2.reshape(hidden_num2, 1)
            input_out = np.dot(self.w3, output_hidden2)+self.b3
            final = sp.expit(input_out)

            E3 = final * (1 - final) * (lable - final)

            while (abs(E3) >= self.error):
                E2 = output_hidden2*(1-output_hidden2)*np.dot(self.w3.T,E3)
                E1 = output_hidden1*(1-output_hidden1)*np.dot(self.w2.T,E2)

                self.w1 += self.rate * E1 * input_data.T
                self.w2 += self.rate * E2 * output_hidden1.T
                self.w3 += self.rate * E3 * output_hidden2.T

                self.b1 += self.rate * E1
                self.b2 += self.rate * E2
                self.b3 += self.rate * E3

                input_hidden1 = np.dot(self.w1, input_data)+self.b1
                output_hidden1 = sp.expit(input_hidden1)
                output_hidden1 = output_hidden1.reshape(hidden_num1, 1)
                input_hidden2 = np.dot(self.w2, output_hidden1)+self.b2
                output_hidden2 = sp.expit(input_hidden2)
                output_hidden2 = output_hidden2.reshape(hidden_num2, 1)
                input_out = np.dot(self.w3, output_hidden2)+self.b3
                final = sp.expit(input_out)
                E3 = final * (1 - final) * (lable - final)

        print('finish train!')


    def privite(self, privite_data):
        privite_data = privite_data.reshape(self.input_num, 1)
        input_hidden1 = np.dot(self.w1, privite_data)+self.b1
        output_hidden1 = sp.expit(input_hidden1)
        output_hidden1 = output_hidden1.reshape(hidden_num1, 1)
        input_hidden2 = np.dot(self.w2, output_hidden1)+self.b2
        output_hidden2 = sp.expit(input_hidden2)
        output_hidden2 = output_hidden2.reshape(hidden_num2, 1)
        input_out = np.dot(self.w3, output_hidden2)+self.b3
        final = sp.expit(input_out)
        return (final*(self.max_data-self.min_data)+self.min_data)


sample_data =np.array([[3.2, 9.6, 3.45,2.15, 140,  2.8,11, 50 ],
 [  3.2,10.3, 3.75,2.2  ,120,  3.4,10.9,70 ],
 [  3,  9,  3.5, 2.2  ,140,  3.5,11.4,50 ],
 [  3.2,10.3, 3.65,2.2 , 150,  2.8,10.8,80 ],
 [  3.2,10.1, 3.5, 2, 80,  1.5,11.3,50 ],
 [  3.4,10,  3.4, 2.15, 130,  3.2,11.5,60 ],
 [  3.2, 9.6, 3.55,2.1, 130,  3.5,11.8,65 ],
 [  3,  9,  3.5, 2.1,  100,  1.8,11.3,40 ],
 [  3.2, 9.6, 3.55,2.1 , 130,  3.5,11.8,65 ],
 [  3.2, 9.2, 3.5, 2.1 , 140,  2.5,11, 50 ],
 [  3.2, 9.5, 3.4, 2.15, 115,  2.8,11.9,50 ],
 [  3.9, 9,  3.1, 2, 80,  2.2,13, 50 ],
 [  3.1, 9.5, 3.6, 2.1,90,  2.7,11.1,70 ],
 [  3.2, 9.7, 3.45,2.15, 130,  4.6,10.85 , 70 ]])
output_data = np.array([[2.24],[ 2.33],[2.24] , [2.32], [2.2], [2.27], [2.2], [2.26], [2.2], [2.24], [2.24], [2.2], [2.2], [2.35]])

hidden_num1 = 2
hidden_num2 = 1
a = net(sample_data,output_data,hidden_num1,hidden_num2)
privite_data = np.array([3.0,9.3,3.3,2.05,100,2.8,11.2,50])
y = a.privite(privite_data)
print("预测得到的跳高成绩为:",y[0][0])

运行结果如下:
在这里插入图片描述

  • 6
    点赞
  • 94
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
这里提供一个简单的基于Python实现bp神经网络预测的示例代码: ```python import numpy as np class BPNeuralNetwork: def __init__(self, input_size, hidden_size, output_size): self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.w1 = np.random.randn(self.input_size, self.hidden_size) self.b1 = np.random.randn(self.hidden_size) self.w2 = np.random.randn(self.hidden_size, self.output_size) self.b2 = np.random.randn(self.output_size) def sigmoid(self, x): return 1.0 / (1.0 + np.exp(-x)) def sigmoid_derivative(self, x): return x * (1.0 - x) def forward(self, x): self.z1 = np.dot(x, self.w1) + self.b1 self.a1 = self.sigmoid(self.z1) self.z2 = np.dot(self.a1, self.w2) + self.b2 self.a2 = self.sigmoid(self.z2) return self.a2 def backward(self, x, y, output): self.error = output - y self.delta2 = self.error * self.sigmoid_derivative(output) self.error_hidden = np.dot(self.delta2, self.w2.T) self.delta1 = self.error_hidden * self.sigmoid_derivative(self.a1) self.w2 -= self.a1.T.dot(self.delta2) self.b2 -= np.sum(self.delta2, axis=0) self.w1 -= x.T.dot(self.delta1) self.b1 -= np.sum(self.delta1, axis=0) def train(self, x, y, epochs=1000, learning_rate=0.1): for i in range(epochs): output = self.forward(x) self.backward(x, y, output) if i % 100 == 0: loss = np.mean(np.square(self.error)) print("Epoch %d Loss: %.4f" % (i, loss)) def predict(self, x): return self.forward(x) ``` 这个类实现了一个具有一个隐藏层的bp神经网络,优化算法为梯度下降,激活函数为sigmoid函数。其中,构造函数中的参数含义为:input_size为输入层大小,hidden_size为隐藏层大小,output_size为输出层大小。forward方法实现了前向传播,backward方法实现了反向传播,train方法实现了训练模型,predict方法实现了模型预测。 下面是一个简单的使用示例: ```python import numpy as np # 构造训练数据 x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([[0], [1], [1], [0]]) # 创建神经网络 nn = BPNeuralNetwork(2, 3, 1) # 训练神经网络 nn.train(x, y, epochs=10000, learning_rate=0.1) # 预测结果 print(nn.predict(np.array([0, 0]))) print(nn.predict(np.array([0, 1]))) print(nn.predict(np.array([1, 0]))) print(nn.predict(np.array([1, 1]))) ``` 这个示例使用了一组简单的逻辑异或问题的训练数据。最终输出的预测结果为: ``` [[0.012]] [[0.988]] [[0.988]] [[0.012]] ``` 可以看到,该bp神经网络成功地解决了逻辑异或问题,预测结果与期望结果非常接近。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值