第一个神经网络代码

--------------------------------------------------MAIN--------------------------------------------------------

from xiuwenapi import *

import matplotlib.pyplot as plt

import pylab

 

input_nodes = 784#输入数据个数

hidden_nodes = 200#

output_nodes = 10

learning_rate = 0.2

 

n = neuralnetwork(input_nodes,hidden_nodes,output_nodes,learning_rate)

#-------------------------------训练阶段↓

traning_data_file = open("E:\\MNIST\\MNIST_CSV格式\\mnist_train.csv",'r')

traning_data_list = traning_data_file.readlines()#读入CVS数据

traning_data_file.close()

epochs = 1

 

for x in range(epochs):

for record in traning_data_list:

all_values = record.split(',')

inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01#缩小范围

targets = numpy.zeros(output_nodes) + 0.01

targets[int(all_values[0])] = 0.99

n.train(inputs,targets)

pass

print(x)

pass

#-------------------------------训练阶段↑

 

#torch.save(n, "E:\\MNIST\\MNIST_CSV格式\\pzwj.pkl")

 

 

#----------------------------------测试阶段↓

print("train OVER\r\n ");

test_data_file = open("E:\\MNIST\\MNIST_CSV格式\\mnist_test.csv", 'r')

test_data_list = test_data_file.readlines()

test_data_file.close()

scorecard = []

for record in test_data_list:

    all_values = record.split(',')

    correct_label = int(all_values[0])

    inputs = (numpy.asfarray(all_values[1:])/255.0*0.99)+0.01

    outputs = n.query(inputs)

    label = numpy.argmax(outputs)

    if (label==correct_label):

        scorecard.append(1)

    else:

        scorecard.append(0)

        pass

    pass

#----------------------------------测试阶段↑

 

scorecard_array = numpy.asarray(scorecard)

print("performance=",scorecard_array.sum()/scorecard_array.size)

print("test OVER\r\n ");

 

-------------------------------------------------------------------API---------------------------------------------------------------------

 

#第一炮  2019_9_25

import numpy

import scipy.special

class qz:

global wih

global who

pass

 

class neuralnetwork:

def __init__(self, inputnodes, hiddennodes,outputnodes,learningrate):

self.inodes = inputnodes

self.hnodes = hiddennodes

self.onodes = outputnodes

self.lr = learningrate#学习率

self.wih = numpy.random.normal( 0.0 , pow( self.hnodes , -0.5 ),( self.hnodes , self.inodes ) )#正态分布的权重

self.who = numpy.random.normal( 0.0 , pow( self.onodes , -0.5 ),( self.onodes , self.hnodes ) )#+1

self.activation_function = lambda x:scipy.special.expit(x)#s函数

 

def train(self,inputs_list,targets_list):

inputs = numpy.array(inputs_list,ndmin = 2).T

targets = numpy.array(targets_list,ndmin = 2).T

 

hidden_inputs = numpy.dot(self.wih,inputs)

hidden_outputs = self.activation_function(hidden_inputs)

 

final_inputs = numpy.dot(self.who,hidden_outputs)

final_outputs = self.activation_function(final_inputs)

 

output_errors = targets - final_outputs

hidden_errors = numpy.dot(self.who.T,output_errors)

 

self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)),numpy.transpose(hidden_outputs))

self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)),numpy.transpose(inputs))

qz.who = self.who

qz.wih = self.wih

 

def query(self,inputs_list):

inputs = numpy.array( inputs_list , ndmin=2 ).T

hidden_inputs = numpy.dot( self.wih,inputs )#权重与输入点乘

hidden_outputs = self.activation_function(hidden_inputs)#阶跃函数

final_inputs = numpy.dot( self.who, hidden_outputs)#权重与中间层点乘

final_outputs = self.activation_function( final_inputs )#阶跃函数

return final_outputs

 

定义一个卷积神经网络(Convolutional Neural Network, CNN)通常涉及到使用深度学习框架,如TensorFlow、PyTorch或Keras。这里以Python和Keras为例,给出一个简单的CNN模型结构定义: ```python from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense # 创建一个Sequential模型 model = Sequential() # 添加第一个卷积层 model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(height, width, channels))) # 需根据实际图像尺寸调整height, width, 和channels # 添加池化层以减小特征图大小 model.add(MaxPooling2D(pool_size=(2, 2))) # 可能会添加更多的卷积层和池化层,例如第二个卷积层 # model.add(Conv2D(...)) # model.add(MaxPooling2D(...)) # 将所有卷积层的输出展平为一维向量,以便输入全连接层 model.add(Flatten()) # 添加全连接层进行分类 model.add(Dense(units=64, activation='relu')) model.add(Dense(units=num_classes, activation='softmax')) # num_classes是你的问题类别数量 # 编译模型,指定损失函数、优化器和评估指标 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # 在此之后,你可以调用fit()方法训练模型 ``` 这是基本框架,具体细节可能会根据任务需求进行调整,比如添加dropout防止过拟合,或者调整参数等。每个部分都有其特定作用,比如卷积层提取特征,池化层降低维度,全连接层用于分类决策。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值