10.23

  1. std And Mean

numpy.flatten() 与 numpy.ravel()numpy.flatten() 与 numpy.ravel()numpy.flatten() 与 numpy.ravel()numpy.flatten() 与 numpy.ravel()
在对图像进行预处理送入到神经网络时 将图像进行归一化会对最终效果产生较好影响 对于RGB图像分别对每一个channel进行归一化 要实现这一目标需要知道每一个channel的均值和方差,统计均值和方差可以将图像通过cv2(读取channel 排列方式为GBR)或者PIL(channel 排列方式为RGB)读取为三维数组(高度,宽度,通道数) 利用一个数组将所有上述三维数组存储起来,并根据通道数维度进行遍历 将数组转化成一维数组 求mean和std
将高维数组转化为一维数组使用到 numpy.flatten()和numpy.ravel() 其中numpy.flatten()相当于返回一个原始数组的拷贝 修改该拷贝不会影响原始数组
而numpy.ravel()相当于获取原始数组的视图 修改该视图会影响原始数组 根据情况进行二者使用

 data = []
    df_train = pd.read_csv('./data/train.csv')

    for file in tqdm(df_train['image_name'], miniters=256):
        img = cv2.imread('./data/train-jpg/{}.jpg'.format(file))
        data.append(cv2.resize(img,(224,224)))# 如果输入图像宽和高不相同 那么在转化成np.array时会报错
    data = np.array(data, np.float32) / 255 # Must use float32 at least otherwise we get over float16 limits
    print("Shape: ", data.shape)

    means = []
    stdevs = []
    for i in range(3):
        pixels = data[:,:,:,i].ravel()
        means.append(np.mean(pixels))
        stdevs.append(np.std(pixels))

    print("means: {}".format(means))
    print("stdevs: {}".format(stdevs))
    print('transforms.Normalize(mean = {}, std = {})'.format(means, stdevs))

transforms.Normalize(mean = [0.39832273, 0.48429126, 0.4609184], std = [0.21070659, 0.16901231, 0.19081236]) (BGR顺序)

2. tqdm
 for fileName in tqdm_notebook(trainDataFram['image_id'],miniters=256):  #miniters  代表每256个数据更新一次进度条
 	do something else 

可以在jupyter和terminal中实时监控进程

3. 使用tensorBoardX 进行训练监测 使用
def train_model(model,modelName,dataLoader, criterion, optimizer, scheduler,num_epochs=2,paramPath=None,cudaDevice='3',batchsize=batch_size):
    writer = SummaryWriter('log')
    os.environ["CUDA_VISIBLE_DEVICES"] = cudaDevice
    model.cuda()
    if paramPath is not None:        
        model.load_state_dict(torch.load(paramPath))
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        scheduler.step()
        model.train(True)  # Set model to training mode
        running_loss = 0.0
        running_corrects = 0
        for batch_idx, (inputs, labels) in enumerate(dataLoader):
            inputs = Variable(inputs.cuda())
            labels = Variable(labels.cuda())
             # zero the parameter gradients
            optimizer.zero_grad()
             # forward
            outputs = model(inputs)
            _, preds = torch.max(outputs.data, 1)
            if isinstance(outputs,tuple):
                loss=sum((criterion(o,labels)) for o in outputs)
            else:
                loss = criterion(outputs, labels)            
            loss.backward()
            optimizer.step()            
            running_loss += loss.item()
            running_corrects += torch.sum(preds == labels.data)
            if batch_idx%10==9:
                print('[epoch:%d,batch:%d]:currect num:%d,total currect:%d,total num:%d,acc: %f'%(epoch,batch_idx,torch.sum(preds == labels.data),running_corrects,len(train_data),float(running_corrects)/(batchsize*(batch_idx+1))))
                niter = epoch * len(train_data) + batch_idx
                writer.add_scalar('Train/Acc',float(running_corrects)/(batchsize*(batch_idx+1)),niter)
                writer.add_scalar('Train/Loss',running_loss/(batchsize*(batch_idx+1)),niter)                
#             print(labels.data)
#             print(preds)

#             print('there is %d nums correct,total correct is %d,totoal num is %d,acc now is %f'%(torch.sum(preds == labels.data),running_corrects,len(train_data),float(running_corrects)/(batchsize*(batch_idx+1))))
#             print('-' * 10+str(batch_idx)+'-' * 10)
        epoch_loss = running_loss / len(train_data)
        epoch_acc = float(running_corrects) /len(train_data)
        torch.save(model.state_dict(),'../model/'+modelName+'_'+str(epoch)+'.pkl')
        print(' Loss: {:.4f} Acc: {:.4f}'.format(
                 epoch_loss, epoch_acc))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值