李宏毅作业二代码解析

整理了一些以后做信号处理用的着的部分

将几个特征拼接起来,concat_feat函数

def shift(x, n):
    if n < 0:
        left = x[0].repeat(-n, 1)
        right = x[:n]

    elif n > 0:
        right = x[-1].repeat(n, 1)
        left = x[n:]
    else:
        return x

    return torch.cat((left, right), dim=0)

def concat_feat(x, concat_n):
    assert concat_n % 2 == 1 # n must be odd
    if concat_n < 2:
        return x
    seq_len, feature_dim = x.size(0), x.size(1)
    x = x.repeat(1, concat_n)
    x = x.view(seq_len, concat_n, feature_dim).permute(1, 0, 2) # concat_n, seq_len, feature_dim
    mid = (concat_n // 2)
    for r_idx in range(1, mid+1):
        x[mid + r_idx, :] = shift(x[mid + r_idx], r_idx)
        x[mid - r_idx, :] = shift(x[mid - r_idx], -r_idx)

预处理数据preprocess_data

预处理首先从原始数据中获取输入特征以及标签(如果是测试则没有标签)

接着将所有特征全部存进一个大矩阵X,并且保存对应的标签y

def preprocess_data(split, feat_dir, phone_path, concat_nframes, train_ratio=0.8, train_val_seed=1337):
    class_num = 41 # NOTE: pre-computed, should not need change
    mode = 'train' if (split == 'train' or split == 'val') else 'test'
    # get the dictionary of label
    label_dict = {}
    if mode != 'test':
      phone_file = open(os.path.join(phone_path, f'{mode}_labels.txt')).readlines()

      for line in phone_file:
          line = line.strip('\n').split(' ')
          label_dict[line[0]] = [int(p) for p in line[1:]]

    # Split the training data and the validation data according to the "train_val_seed" and "train_ratio(0.8)".
    # For testing data, we only get the path
    # The final list is the usage_list
    if split == 'train' or split == 'val':
        # split training and validation data
        usage_list = open(os.path.join(phone_path, 'train_split.txt')).readlines()
        random.seed(train_val_seed)
        random.shuffle(usage_list)
        percent = int(len(usage_list) * train_ratio)
        usage_list = usage_list[:percent] if split == 'train' else usage_list[percent:]
    elif split == 'test':
        usage_list = open(os.path.join(phone_path, 'test_split.txt')).readlines()
    else:
        raise ValueError('Invalid \'split\' argument for dataset: PhoneDataset!')

    usage_list = [line.strip('\n') for line in usage_list]
    # print useful information to help verifying the training code
    print('[Dataset] - # phone classes: ' + str(class_num) + ', number of utterances for ' + split + ': ' + str(len(usage_list)))

    # max_len is a ceiling that could save all data
    max_len = 3000000
    # X is the input data and y is the label data
    X = torch.empty(max_len, 39 * concat_nframes)
    if mode != 'test':
      y = torch.empty(max_len, dtype=torch.long)

    idx = 0
    for i, fname in tqdm(enumerate(usage_list)):
        feat = load_feat(os.path.join(feat_dir, mode, f'{fname}.pt'))
        # cur_len is the length of the input, different wave has different input.
        # However, they all correspond to specific labels.
        cur_len = len(feat)
        feat = concat_feat(feat, concat_nframes)
        if mode != 'test':
          label = torch.LongTensor(label_dict[fname])

        X[idx: idx + cur_len, :] = feat
        if mode != 'test':
          y[idx: idx + cur_len] = label

        idx += cur_len
    
    # idx record the length of the feature, we remove the remaining space
    # if it is testing, we do not need to operate the y
    X = X[:idx, :]
    if mode != 'test':
      y = y[:idx]

    print(f'[INFO] {split} set')
    print(X.shape)
    if mode != 'test':
      print(y.shape)
      return X, y
    else:
      return X

LibriDataset类

我感觉就是为了让训练、验证数据和测试数据形式一样硬写出来的。当然,这也是为了更好的将特征和标签拼接在一起。

这样输出就是一个结构体,其中包括data和label

class LibriDataset(Dataset):
    def __init__(self, X, y=None):
        self.data = X
        if y is not None:
            self.label = torch.LongTensor(y)
        else:
            self.label = None

    def __getitem__(self, idx):
        if self.label is not None:
            return self.data[idx], self.label[idx]
        else:
            return self.data[idx]

    def __len__(self):
        return len(self.data)

same_seeds函数

好像封装起来了,应该就是跑了跑随机种子

def same_seeds(seed):
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

 BasicBlock类和Classifier类

构建神经网络的分类器。基础块就是构建了一个简单的线性层+relu,分类器用了多个基础块。这里仅用了1次基础块。当然这一块可以改变。这一块改动很多,没什么好研究的。

class BasicBlock(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(BasicBlock, self).__init__()
        self.block = nn.Sequential(
            nn.BatchNorm1d(input_dim, affine=True,momentum=0.1),
            nn.Linear(input_dim, output_dim),
            #nn.Dropout(p=0.2),
            nn.ReLU()
        )

    def forward(self, x):
        x = self.block(x)
        return x


class Classifier(nn.Module):
    def __init__(self, input_dim, output_dim=41, hidden_layers=1, hidden_dim=256):
        super(Classifier, self).__init__()

        self.fc = nn.Sequential(
            BasicBlock(input_dim, hidden_dim),
            *[BasicBlock(hidden_dim, hidden_dim) for _ in range(hidden_layers)],
            nn.Linear(hidden_dim, output_dim)
        )

    def forward(self, x):
        x = self.fc(x)
        return x

一些超参数,直接看代码注释就好

# Hyper-parameters
# data prarameters
concat_nframes = 21            # the number of frames to concat with, n must be odd (total 2k+1 = n frames)
train_ratio = 0.8               # the ratio of data used for training, the rest will be used for validation

# training parameters
seed = 1314                        # random seed
batch_size = 256                # batch size
num_epoch = 20              # the number of training epoch
learning_rate = 0.0001          # learning rate
model_path = './model.ckpt'     # the path where the checkpoint will be saved

# model parameters
input_dim = 39 * concat_nframes # the input dim of the model, you should not change the value
hidden_layers = 256             # the number of hidden layers
hidden_dim = 4                # the hidden dim

准备好数据和模型,难点上面都讲了,也加了注释

#Prepare dataset and model
import gc

# preprocess data
train_X, train_y = preprocess_data(split='train', feat_dir='./libriphone/feat', phone_path='./libriphone', concat_nframes=concat_nframes, train_ratio=train_ratio)
val_X, val_y = preprocess_data(split='val', feat_dir='./libriphone/feat', phone_path='./libriphone', concat_nframes=concat_nframes, train_ratio=train_ratio)

# get dataset
train_set = LibriDataset(train_X, train_y)
val_set = LibriDataset(val_X, val_y)

# remove raw feature to save memory
del train_X, train_y, val_X, val_y
gc.collect() # remove the rubbish

# get dataloader
# shuffle: change the data in every batch
# collate_fn: change the dimension of the data, for example,from 4*(100,256,64) to (4,100,256,64)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)

device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print(f'DEVICE: {device}')

import numpy as np

#fix seed
def same_seeds(seed):
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True


# fix random seed
same_seeds(seed)

# create model, define a loss function, and optimizer
model = Classifier(input_dim=input_dim, hidden_layers=hidden_layers, hidden_dim=hidden_dim).to(device)
# use the CrossEntropyLoss after model of Classifier
criterion = nn.CrossEntropyLoss()
#optimizer:adamw
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)

训练部分(使用了训练数据和测试数据)

在训练过程中,acc和loss很明显都是我们很关心的量。因此这里一开始就直接放出来。这里我们的epoch总共是20个,因此跑20轮。

这里补充说明一下train_loader和val_loader,他们使用了DataLoader函数,会包括机器学习时需要的信息

 你可以把他们直接当作训练数据和验证数据(含标签)

 因此这行代码

for i, batch in enumerate(tqdm(train_loader)):

实际上是从训练数据中,拿出第一个batch的数据。刚开始的时候i就是0.每个batch包括了features, labels。这里将它提取出来

train_acc += (train_pred.detach() == labels.detach()).sum().item()

这一行就是计算这一个batch里面有多少个预测对了。刚开始的时候可能一个也不对,不过不急慢慢回传会越来越好。

其它的训练部分还是比较简单的。有一段应该是为了不设置验证集也能跑设定的。

# Training
best_acc = 0.0
for epoch in range(num_epoch):
    train_acc = 0.0
    train_loss = 0.0
    val_acc = 0.0
    val_loss = 0.0

    # training
    model.train()  # set the model to training mode
    for i, batch in enumerate(tqdm(train_loader)):
        features, labels = batch
        features = features.to(device) # put the data to gpu
        labels = labels.to(device) # put the data to gpu

        optimizer.zero_grad() #Initialization: gradient is 0
        outputs = model(features) #use the model to get the outputs

        loss = criterion(outputs, labels) #calculate the loss using Cross entropy
        loss.backward()
        optimizer.step()

        # get the highest probability, this corresponds to the training result
        _, train_pred = torch.max(outputs, 1)  # get the index of the class with the highest probability
        #detach():get a tensor without gradient
        train_acc += (train_pred.detach() == labels.detach()).sum().item() #Add the correct predicted values in all batches and add the obtained values to the whole train_ Acc
        train_loss += loss.item() # item: get the value in the tensor
        #break #to debug

    # validation
    if len(val_set) > 0:
        model.eval()  # set the model to evaluation mode
        with torch.no_grad(): # would not get derivation automaticly
            for i, batch in enumerate(tqdm(val_loader)):
                features, labels = batch
                features = features.to(device)
                labels = labels.to(device)
                outputs = model(features)

                loss = criterion(outputs, labels)

                _, val_pred = torch.max(outputs, 1)
                #from gpu to cpu
                val_acc += (
                            val_pred.cpu() == labels.cpu()).sum().item()  # get the index of the class with the highest probability
                val_loss += loss.item()

            print('[{:03d}/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f} | Val Acc: {:3.6f} loss: {:3.6f}'.format(
                epoch + 1, num_epoch, train_acc / len(train_set), train_loss / len(train_loader),
                val_acc / len(val_set), val_loss / len(val_loader)
            ))

            # if the model improves, save a checkpoint at this epoch
            if val_acc > best_acc:
                best_acc = val_acc
                torch.save(model.state_dict(), model_path)
                print('saving model with acc {:.3f}'.format(best_acc / len(val_set)))
    else: #only happen when you do not set the validating data
        print('[{:03d}/{:03d}] Train Acc: {:3.6f} Loss: {:3.6f}'.format(
            epoch + 1, num_epoch, train_acc / len(train_set), train_loss / len(train_loader)
        ))

# if not validating, save the last epoch
if len(val_set) == 0:
    torch.save(model.state_dict(), model_path)
    print('saving model at last epoch')

测试

和验证很像,只是此时只需要将结果直接输出拼接即可

#Testing
#Create a testing dataset, and load model from the saved checkpoint.
# load data
test_X = preprocess_data(split='test', feat_dir='./libriphone/feat', phone_path='./libriphone', concat_nframes=concat_nframes)
test_set = LibriDataset(test_X, None)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)

#Make prediction.
test_acc = 0.0
test_lengths = 0
pred = np.array([], dtype=np.int32)

model.eval()
with torch.no_grad():
    for i, batch in enumerate(tqdm(test_loader)):
        features = batch
        features = features.to(device)

        outputs = model(features)

        _, test_pred = torch.max(outputs, 1) # get the index of the class with the highest probability
        pred = np.concatenate((pred, test_pred.cpu().numpy()), axis=0)

将结果写入到CSV中

#Write prediction to a CSV file.

#After finish running this block, download the file prediction.csv from the files section on the left-hand side and submit it to Kaggle.
with open('prediction.csv', 'w') as f:
    f.write('Id,Class\n')
    for i, y in enumerate(pred):
        f.write('{},{}\n'.format(i, y))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值