import random
import threading
import math
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.weights_ih = [[random.uniform(-1, 1) for _ in range(hidden_size)] for _ in range(input_size)]
self.weights_ho = [[random.uniform(-1, 1) for _ in range(output_size)] for _ in range(hidden_size)]
self.biases_h = [random.uniform(-1, 1) for _ in range(hidden_size)]
self.biases_o = [random.uniform(-1, 1) for _ in range(output_size)]
self.lock = threading.Lock()
def feedforward(self, inputs):
hidden = [0] * self.hidden_size
for i in range(self.input_size):
for j in range(self.hidden_size):
hidden[j] += inputs[i] * self.weights_ih[i][j]
for i in range(self.hidden_size):
hidden[i] = self.sigmoid(hidden[i])
outputs = [0] * self.output_size
for i in range(self.hidden_size):
for j in range(self.output_size):
outputs[j] += hidden[i] * self.weights_ho[i][j]
for i in range(self.output_size):
outputs[i] = self.sigmoid(outputs[i])
return outputs
def train(self, inputs, targets):
outputs = self.feedforward(inputs)
# 计算误差
errors = [targets[i] - outputs[i] for i in range(self.output_size)]
# 更新权重
self.lock.acquire() # 加锁以避免多线程写操作冲突
for i in range(self.input_size):
for j in range(self.hidden_size):
self.weights_ih[i][j] += errors[j] * inputs[i]
for i in range(self.hidden_size):
for j in range(self.output_size):
self.weights_ho[i][j] += errors[j] * self.sigmoid_derivative(outputs[j])
self.lock.release() # 解锁
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def sigmoid_derivative(self, x):
return x * (1 - x)
# 使用示例
input_size = 2
hidden_size = 3
output_size = 1
neural_network = NeuralNetwork(input_size, hidden_size, output_size)
inputs = [random.uniform(0, 1) for _ in range(input_size)]
outputs = neural_network.feedforward(inputs)
print(outputs)