逐行统计python代码执行时间
在炼丹或者debug的过程中,我们经常需要去逐行统计代码运行的时间。使用time.time()是可以解决问题,但是过于繁琐。于是通过一顿踩坑,推荐使用line_profiler库, 安装方式如下,十分简单。
pip install line_profiler
安装成功,下面开始介绍如何使用。
from __future__ import print_function
from line_profiler import LineProfiler
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
from data import ModelNet_Loader
from model import PointNet
import numpy as np
from torch.utils.data import DataLoader
from util import cal_loss, IOStream
import sklearn.metrics as metrics
def train(args, io):
train_loader = DataLoader(ModelNet40(args, partition='train', num_points=args.num_points), batch_size=args.batch_size, shuffle=True, drop_last=False)
test_loader = DataLoader(ModelNet40(args, partition='test', num_points=args.num_points), num_workers=8,batch_size=args.test_batch_size, shuffle=False, drop_last=False)
device = torch.device("cuda:0" if args.cuda else "cpu")
#Try to load models
if args.model == 'pointnet':
model = PointNet(args).to(device)
print(str(model))
if args.use_sgd:
print("Use SGD")
opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
else:
print("Use Adam")
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
if args.scheduler == 'cos':
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3)
elif args.scheduler == 'step':
scheduler = StepLR(opt, step_size=20, gamma=0.7)
criterion = cal_loss
best_test_acc = 0.85
for epoch in range(args.epochs):
####################
# Train
####################
train_loss = 0.0
count = 0.0
model.train()
train_pred = []
train_true = []
time_epoch = time.time()
for data, label in train_loader:
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
opt.zero_grad()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
opt.step()
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += loss.item() * batch_size
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(description='Point Cloud Recognition')
parser.add_argument('--exp_name', type=str, default='exp_dgcnn_modelnet_2048_time', metavar='N',help='Name of the experiment')
parser.add_argument('--model', type=str, default='dgcnn', metavar='N',choices=['pointnet', 'dgcnn','pointcnn'],help='Model to use, [pointnet, dgcnn]')
parser.add_argument('--dataset', type=str, default='modelnet40', metavar='N')
parser.add_argument('--batch_size', type=int, default=8, metavar='batch_size',help='Size of batch)')
parser.add_argument('--test_batch_size', type=int, default=4, metavar='batch_size',help='Size of batch)')
parser.add_argument('--epochs', type=int, default=2, metavar='N',help='number of episode to train ')
args = parser.parse_args()
lp = LineProfiler()
lp_wrapper = lp(train)
lp_wrapper(args, io)
lp.print_stats()
简化的代码,中间可能会有缺失,但是不影响介绍line_profiler的使用。
如下图所示,显示了每一行代码的运行时间,调用次数和占用百分比