pytorch_CPU_GPU时间测试

参考

https://zhuanlan.zhihu.com/p/35434175

https://www.kaggle.com/scottclowe/testing-gpu-enabled-notebooks-mnist-pytorch

#%%time 
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable
import time
import pandas as pd 
from IPython.display import display
import seaborn as sns 
# Linear Regression Model
class Net(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.l1 = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        out = self.fc1(x)
        out = self.l1(out)
        out = self.fc2(out)
        return out

####################################
use_gpu_sta=[True,False]
n_sample_size=[5000,10000,20000,30000,40000,50000,60000,80000,100000]
time_eva=pd.DataFrame(columns=n_sample_size,index=["GPU","CPU"])
####################################
# print start time
# read data
for use_gpu in use_gpu_sta:
    for n_sample in n_sample_size:
        start_time=time.time()
        #print("Start time = "+time.ctime())
        inp = np.random.randn(n_sample,1000)
        oup = np.random.randn(n_sample,32)

        inp = inp.astype(np.float32)
        oup = oup.astype(np.float32)
        # Hyper Parameters
        input_size = inp.shape[1]
        hidden_size = 256
        output_size = 32
        num_epochs = 50
        learning_rate = 0.01

        # Toy Dataset
        x_train = inp
        y_train = oup
        model = Net(input_size, hidden_size, output_size)
        #print(model)
        # Loss and Optimizer
        criterion = nn.MSELoss()
        optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)  

        ###### GPU
        if use_gpu:
            #print("We are using GPU now!!!")
            model = model.cuda()
        else:
            #print("We are using CPU now")
            device="cpu"
            model=model.to(device)

        # Train the Model 
        for epoch in range(num_epochs):
            # Convert numpy array to torch Variable
            if(use_gpu):
                inputs  = Variable(torch.from_numpy(x_train).cuda())
                targets = Variable(torch.from_numpy(y_train).cuda())
            else:
                inputs  = Variable(torch.from_numpy(x_train))
                targets = Variable(torch.from_numpy(y_train))
            # Forward + Backward + Optimize
            optimizer.zero_grad()  
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

#         if (epoch+1) % 5 == 0:
#             print ('Epoch [%d/%d], Loss: %.4f' 
#                    %(epoch+1, num_epochs, loss.item()))

    # print end time
        if(use_gpu):
            end_time=time.time()
            #print("End time = "+time.ctime())
            cost_time=end_time-start_time
            print("n_sample={},GPU cost time:{}".format(n_sample,cost_time))
            time_eva.loc[["GPU"],[n_sample]]=cost_time
        else:
            end_time=time.time()
            cost_time=end_time-start_time
            #print("End time = "+time.ctime())
            print("n_sample={},CPU cost time:{}".format(n_sample,cost_time))
            time_eva.loc[["CPU"],[n_sample]]=cost_time
        print("======================================")
        
display(time_eva)
time_plot=time_eva.T
time_plot["n_sample"]=np.array(n_sample_size)
print(time_plot)

time_plot.plot(x="n_sample", kind="bar")
plt.title("Running Time(s)")
plt.show()
# # Plot the graph
# if(use_gpu):
#     predicted = model(Variable(torch.from_numpy(x_train).cuda())).data.cpu().numpy()
# else:
#     predicted = model(Variable(torch.from_numpy(x_train))).data.numpy()
# plt.plot( y_train/500, 'r-', label='Original data')
# plt.plot( predicted/500,'-', label='Fitted line')
# #plt.plot(y_train/500, predicted/500,'.', label='Fitted line')
# plt.legend()
# plt.show()

最终的结果如下
在这里插入图片描述在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值