参考
https://zhuanlan.zhihu.com/p/35434175
https://www.kaggle.com/scottclowe/testing-gpu-enabled-notebooks-mnist-pytorch
#%%time
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable
import time
import pandas as pd
from IPython.display import display
import seaborn as sns
# Linear Regression Model
class Net(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.l1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
out = self.fc1(x)
out = self.l1(out)
out = self.fc2(out)
return out
####################################
use_gpu_sta=[True,False]
n_sample_size=[5000,10000,20000,30000,40000,50000,60000,80000,100000]
time_eva=pd.DataFrame(columns=n_sample_size,index=["GPU","CPU"])
####################################
# print start time
# read data
for use_gpu in use_gpu_sta:
for n_sample in n_sample_size:
start_time=time.time()
#print("Start time = "+time.ctime())
inp = np.random.randn(n_sample,1000)
oup = np.random.randn(n_sample,32)
inp = inp.astype(np.float32)
oup = oup.astype(np.float32)
# Hyper Parameters
input_size = inp.shape[1]
hidden_size = 256
output_size = 32
num_epochs = 50
learning_rate = 0.01
# Toy Dataset
x_train = inp
y_train = oup
model = Net(input_size, hidden_size, output_size)
#print(model)
# Loss and Optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
###### GPU
if use_gpu:
#print("We are using GPU now!!!")
model = model.cuda()
else:
#print("We are using CPU now")
device="cpu"
model=model.to(device)
# Train the Model
for epoch in range(num_epochs):
# Convert numpy array to torch Variable
if(use_gpu):
inputs = Variable(torch.from_numpy(x_train).cuda())
targets = Variable(torch.from_numpy(y_train).cuda())
else:
inputs = Variable(torch.from_numpy(x_train))
targets = Variable(torch.from_numpy(y_train))
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# if (epoch+1) % 5 == 0:
# print ('Epoch [%d/%d], Loss: %.4f'
# %(epoch+1, num_epochs, loss.item()))
# print end time
if(use_gpu):
end_time=time.time()
#print("End time = "+time.ctime())
cost_time=end_time-start_time
print("n_sample={},GPU cost time:{}".format(n_sample,cost_time))
time_eva.loc[["GPU"],[n_sample]]=cost_time
else:
end_time=time.time()
cost_time=end_time-start_time
#print("End time = "+time.ctime())
print("n_sample={},CPU cost time:{}".format(n_sample,cost_time))
time_eva.loc[["CPU"],[n_sample]]=cost_time
print("======================================")
display(time_eva)
time_plot=time_eva.T
time_plot["n_sample"]=np.array(n_sample_size)
print(time_plot)
time_plot.plot(x="n_sample", kind="bar")
plt.title("Running Time(s)")
plt.show()
# # Plot the graph
# if(use_gpu):
# predicted = model(Variable(torch.from_numpy(x_train).cuda())).data.cpu().numpy()
# else:
# predicted = model(Variable(torch.from_numpy(x_train))).data.numpy()
# plt.plot( y_train/500, 'r-', label='Original data')
# plt.plot( predicted/500,'-', label='Fitted line')
# #plt.plot(y_train/500, predicted/500,'.', label='Fitted line')
# plt.legend()
# plt.show()
最终的结果如下