卷积的计算 - numpy的实现 1

卷积的计算 - numpy的实现 1

flyfish

确定是什么样的乘

类型是 array的情况下

# @ = np.dot =  np.multiply =  矩阵乘法
# np.multiply = * = 按元素相乘

类型是 matrix的情况下

# @ = np.dot = np.matmul = * = 矩阵乘法
# np.multiply = 按元素相乘(唯的一个)

验证代码如下

import numpy as np

#array
# @ = np.dot =  np.multiply =  矩阵乘法
# np.multiply = * = 元素乘法
a = np.array([[1,2],[5,6]])
b = np.array([[1, 2], [3, 4]])

r0 = a @  b#矩阵乘法
print('r0: %s' %(r0))

r1 = np.dot(a, b)#矩阵乘法
print('r1: %s' %(r1))

r2 = np.matmul(a , b)
print('r2: %s' %(r2))#矩阵乘法



r3 = np.multiply(a , b)
print('r3: %s' %(r3))#按元素相乘
 
r4 = a * b
print('r4: %s' %(r4))#按元素相乘

# matrix
# @ = np.dot = np.matmul = * = 矩阵乘法
# np.multiply = 按元素相乘(唯的一个)

a = np.matrix([[1,2],[5,6]])
b = np.matrix([[1, 2], [3, 4]])

r0 = a @  b#矩阵乘法
print('r0: %s' %(r0))

r1 = np.dot(a, b)#矩阵乘法
print('r1: %s' %(r1))

r2 = np.matmul(a , b)
print('r2: %s' %(r2))#矩阵乘法


r3 = np.multiply(a , b)
print('r3: %s' %(r3))#按元素相乘(唯的一个)
 
r4 = a * b
print('r4: %s' %(r4))#矩阵乘法

array

r0: [[ 7 10]
 [23 34]]
r1: [[ 7 10]
 [23 34]]
r2: [[ 7 10]
 [23 34]]
r3: [[ 1  4]
 [15 24]]
r4: [[ 1  4]
 [15 24]]

matrix

r0: [[ 7 10]
 [23 34]]
r1: [[ 7 10]
 [23 34]]
r2: [[ 7 10]
 [23 34]]
r3: [[ 1  4]
 [15 24]]
r4: [[ 7 10]
 [23 34]]

后面使用array类型中的和元素乘法即 * 或者 multiply 实现

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
手写数字识别使用卷积神经网络纯numpy实现需要经过以下步骤: 1.准备数据集:使用MNIST数据集,其包含60,000个训练图像和10,000个测试图像。 2.数据预处理:将图像数据进行标准化处理,将标签数据进行one-hot编码。 3.构建卷积神经网络模型:使用numpy实现卷积层、池化层、全连接层和softmax层。 4.训练模型:使用训练集对模型进行训练。 5.测试模型:使用测试集对模型进行测试,并计算分类准确率。 下面是一个具体的代码实现: ```python import numpy as np import mnist #pip install python-mnist # 数据预处理 def normalize(x): x = x / 255. return x def one_hot(y): return np.eye(10)[y] # 加载数据集 train_images = mnist.train_images() train_labels = mnist.train_labels() test_images = mnist.test_images() test_labels = mnist.test_labels() # 数据预处理 train_images = normalize(train_images) test_images = normalize(test_images) train_labels = one_hot(train_labels) test_labels = one_hot(test_labels) # 构建卷积神经网络模型 class ConvolutionalNeuralNetwork: def __init__(self): self.conv1_filters = 8 self.conv1_filter_size = 3 self.conv2_filters = 16 self.conv2_filter_size = 3 self.fc1_units = 128 self.output_units = 10 self.conv1_weights = np.random.randn(self.conv1_filters, self.conv1_filter_size, self.conv1_filter_size) self.conv1_bias = np.zeros((self.conv1_filters,)) self.conv2_weights = np.random.randn(self.conv2_filters, self.conv2_filter_size, self.conv2_filter_size) self.conv2_bias = np.zeros((self.conv2_filters,)) self.fc1_weights = np.random.randn(7*7*self.conv2_filters, self.fc1_units) self.fc1_bias = np.zeros((self.fc1_units,)) self.output_weights = np.random.randn(self.fc1_units, self.output_units) self.output_bias = np.zeros((self.output_units,)) def conv2d(self, x, weight, bias): filter_size = weight.shape[1] output_size = x.shape[0] - filter_size + 1 output = np.zeros((output_size, output_size)) for i in range(output_size): for j in range(output_size): output[i, j] = np.sum(x[i:i+filter_size, j:j+filter_size] * weight) + bias return output def max_pool2d(self, x, size): output_size = x.shape[0] // size output = np.zeros((output_size, output_size)) for i in range(output_size): for j in range(output_size): output[i, j] = np.max(x[i*size:i*size+size, j*size:j*size+size]) return output def relu(self, x): return np.maximum(x, 0) def softmax(self, x): exp_x = np.exp(x) return exp_x / np.sum(exp_x) def forward(self, x): x = x.reshape((28, 28)) x = self.conv2d(x, self.conv1_weights, self.conv1_bias) x = self.relu(x) x = self.max_pool2d(x, 2) x = self.conv2d(x, self.conv2_weights, self.conv2_bias) x = self.relu(x) x = self.max_pool2d(x, 2) x = x.reshape((-1,)) x = np.dot(x, self.fc1_weights) + self.fc1_bias x = self.relu(x) x = np.dot(x, self.output_weights) + self.output_bias x = self.softmax(x) return x def train(self, x, y, learning_rate): # 前向传播 x = x.reshape((28, 28)) conv1_output = self.conv2d(x, self.conv1_weights, self.conv1_bias) conv1_output_relu = self.relu(conv1_output) max_pool1_output = self.max_pool2d(conv1_output_relu, 2) conv2_output = self.conv2d(max_pool1_output, self.conv2_weights, self.conv2_bias) conv2_output_relu = self.relu(conv2_output) max_pool2_output = self.max_pool2d(conv2_output_relu, 2) fc1_input = max_pool2_output.reshape((-1,)) fc1_output = np.dot(fc1_input, self.fc1_weights) + self.fc1_bias fc1_output_relu = self.relu(fc1_output) output_input = np.dot(fc1_output_relu, self.output_weights) + self.output_bias output_output = self.softmax(output_input) # 反向传播 output_error = output_output - y output_delta = output_error fc1_error = np.dot(output_delta, self.output_weights.T) fc1_delta = fc1_error * (fc1_output_relu > 0) fc1_weights_grad = np.outer(fc1_input, fc1_delta) fc1_bias_grad = fc1_delta conv2_error = fc1_delta.reshape((7, 7, self.conv2_filters)) conv2_delta = conv2_error * (conv2_output_relu > 0) conv2_weights_grad = np.zeros_like(self.conv2_weights) for i in range(self.conv2_filters): conv2_weights_grad[i] = np.sum(max_pool1_output[:, :, i:i+1] * conv2_delta, axis=2) conv2_bias_grad = np.sum(conv2_delta, axis=(0,1)) max_pool1_error = self.conv2d(conv2_delta, np.ones((2,2)), np.zeros((1,))) conv1_delta = max_pool1_error * (conv1_output_relu > 0) conv1_weights_grad = np.zeros_like(self.conv1_weights) for i in range(self.conv1_filters): conv1_weights_grad[i] = np.sum(x[:, :, i:i+1] * conv1_delta, axis=2) conv1_bias_grad = np.sum(conv1_delta, axis=(0,1)) # 权重更新 self.conv1_weights -= learning_rate * conv1_weights_grad self.conv1_bias -= learning_rate * conv1_bias_grad self.conv2_weights -= learning_rate * conv2_weights_grad self.conv2_bias -= learning_rate * conv2_bias_grad self.fc1_weights -= learning_rate * fc1_weights_grad self.fc1_bias -= learning_rate * fc1_bias_grad self.output_weights -= learning_rate * np.outer(fc1_output_relu, output_delta) self.output_bias -= learning_rate * output_delta def predict(self, x): y_pred = np.zeros((len(x), 10)) for i in range(len(x)): y_pred[i] = self.forward(x[i]) return y_pred # 训练模型 model = ConvolutionalNeuralNetwork() batch_size = 32 learning_rate = 0.1 num_epochs = 5 num_batches = len(train_images) // batch_size for epoch in range(num_epochs): for batch in range(num_batches): batch_start = batch * batch_size batch_end = (batch + 1) * batch_size x_batch = train_images[batch_start:batch_end] y_batch = train_labels[batch_start:batch_end] model.train(x_batch, y_batch, learning_rate) y_pred = model.predict(train_images) accuracy = np.mean(np.argmax(y_pred, axis=1) == np.argmax(train_labels, axis=1)) print("Epoch: {}, Accuracy: {:.3f}".format(epoch+1, accuracy)) # 测试模型 y_pred = model.predict(test_images) accuracy = np.mean(np.argmax(y_pred, axis=1) == np.argmax(test_labels, axis=1)) print("Test Accuracy: {:.3f}".format(accuracy)) ``` 这个代码实现了一个包含2个卷积层、2个池化层和1个全连接层的卷积神经网络模型,并使用MNIST数据集进行训练和测试。你可以根据自己的需求修改模型结构和训练参数。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

西笑生

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值