导入库
import torch
import numpy as np
from torch. utils. data import DataLoader
from torchvision import datasets
from torchvision import transforms
import matplotlib. pyplot as plt
例子1
in_channels, out_channels = 5 , 10
width, height = 100 , 100
kernel_size = 3
batch_size = 1
input = torch. randn( batch_size, in_channels, width, height)
Conv_Layer = torch. nn. Conv2d( in_channels, out_channels, kernel_size= kernel_size)
output = Conv_Layer( input )
print ( input . shape)
print ( output. shape)
print ( Conv_Layer. weight. shape)
例子2
input = [ 3 , 4 , 6 , 5 , 7 ,
2 , 4 , 6 , 8 , 2 ,
1 , 6 , 7 , 8 , 4 ,
9 , 7 , 4 , 6 , 2 ,
3 , 7 , 5 , 4 , 1 ]
input = torch. tensor( input , dtype= torch. float32) . view( 1 , 1 , 5 , 5 )
Conv_layer = torch. nn. Conv2d( 1 , 1 , kernel_size= 3 , padding= 1 , bias= False )
kernel = torch. tensor( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ] , dtype= torch. float32) . view( 1 , 1 , 3 , 3 )
Conv_layer. weight. data = kernel. data
output = Conv_layer( input )
print ( output)
例子3(MaxPooling)
input = [ 3 , 4 , 5 , 6 ,
2 , 4 , 6 , 8 ,
1 , 6 , 7 , 8 ,
9 , 7 , 4 , 6 ]
input = torch. tensor( input , dtype= torch. float32) . view( 1 , 1 , 4 , 4 )
maxpooling_layer = torch. nn. MaxPool2d( kernel_size= 2 )
output = maxpooling_layer( input )
print ( output)
实现简单CNN
transform = transforms. Compose( [ transforms. ToTensor( ) ,
transforms. Normalize( ( 0.1307 , ) , ( 0.3081 , ) ) ] )
tra_data = datasets. MNIST( root= "./datasets/mnist" , train= True , download= False , transform= transform)
test_data = datasets. MNIST( root= "./datasets/mnist" , train= False , download= False , transform= transform)
tra_loader = DataLoader( dataset= tra_data, batch_size= 64 , shuffle= True )
test_loader = DataLoader( dataset= test_data, batch_size= 64 , shuffle= False )
class Net ( torch. nn. Module) :
def __init__ ( self) :
super ( Net, self) . __init__( )
self. conv1 = torch. nn. Conv2d( 1 , 10 , kernel_size= 5 )
self. pooling = torch. nn. MaxPool2d( 2 )
self. conv2 = torch. nn. Conv2d( 10 , 20 , kernel_size= 5 )
self. linear = torch. nn. Linear( 320 , 10 )
self. relu = torch. nn. ReLU( )
def forward ( self, x) :
batch_size = x. size( 0 )
x = self. pooling( self. relu( self. conv1( x) ) )
x = self. pooling( self. relu( self. conv2( x) ) )
x = x. view( batch_size, - 1 )
x = self. linear( x)
return x
model = Net( )
device = torch. device( "cuda:0" if torch. cuda. is_available( ) else 'cpu' )
model = model. to( device)
criterion = torch. nn. CrossEntropyLoss( )
criterion = criterion. to( device)
optimizer = torch. optim. SGD( model. parameters( ) , lr= 0.01 , momentum= 0.5 )
def train ( epoch) :
running_loss = 0.0
for i, ( data) in enumerate ( tra_loader) :
inputs, targets = data
inputs, targets = inputs. to( device) , targets. to( device)
optimizer. zero_grad( )
y_pred = model( inputs)
l = criterion( y_pred, targets)
l. backward( )
optimizer. step( )
running_loss += l. item( )
if i % 300 == 299 :
print ( "[%d %5d]\tloss: %3f" % ( epoch+ 1 , i+ 1 , running_loss / 300 ) )
running_loss = 0.0
def test ( ) :
total = 0
correct = 0
accuracy = 0.0
with torch. no_grad( ) :
for data in test_loader:
x, labels = data
x, labels = x. to( device) , labels. to( device)
outputs = model( x)
total += labels. size( 0 )
_, predicted = torch. max ( outputs. data, dim= 1 )
correct += ( predicted == labels) . sum ( ) . item( )
print ( "Accuracy on Test is: %3f %% [%d %d]" % ( 100 * correct / total, correct, total) )
accuracy = 100 * correct / total
return accuracy
if __name__ == '__main__' :
acc_list = [ ]
for epoch in range ( 10 ) :
train( epoch)
accuracy = test( )
acc_list. append( accuracy)
acc_list = np. array( acc_list)
plt. plot( range ( 10 ) , acc_list)
plt. xlabel( 'epoch' )
plt. ylabel( 'Accuracy' )
plt. show( )
plt. close( )