生成batch的两种方法

法一:DataLoader内置数据加载器

import torch.utils.data as Data

#数据加载器
torch_dataset = Data.TensorDataset(trainX,trainY) #trainX,trainY是用于训练的数据和标签
loader = Data.DataLoader(
    dataset=torch_dataset,
    batch_size=128,  #batch_size
    shuffle=True,  #True,随机打乱
)

print(" Start training... ")
for epoch in range(epochs):
    loss_sum = 0
    num = 0
    for step,(batch_x,batch_y) in enumerate(loader):
        y_pre = model(batch_x)
        #y_pre = y_pre.cpu()
        #batch_y = batch_y.type(torch.FloatTensor)
        loss = loss_func(y_pre, batch_y)
        loss_sum = loss_sum + loss
        num = num + 1
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    if epoch % 100 == 0:
    		torch.save(model.state_dict(), str(epoch)+'model.pkl')
    loss_mean = loss_sum / num
    print('epoch:', epoch, '   loss:', loss_mean)

法二:yeild函数

def get_batches(self, inputs, targets, batch_size, shuffle=True): 
    	#inputs相当于X,targets相当于Y
        length = len(inputs)
        if shuffle: #shuffle 随机打乱
            index = torch.randperm(length)
        else:
            index = torch.LongTensor(range(length))
        start_idx = 0
        while (start_idx < length):
            end_idx = min(length, start_idx + batch_size)
            excerpt = index[start_idx:end_idx]
            X = inputs[excerpt]; Y = targets[excerpt];
            yield Variable(X), Variable(Y);
            start_idx += batch_size

for X, Y in data.get_batches(X, Y, batch_size, True):
    model.zero_grad();
    output = model(X);
    loss = loss_func(output, Y);
    loss.backward();
    grad_norm = optim.step();
    total_loss += loss;
    loss = total_loss/n_samples

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值