歌词解析器:用python基础语句依照时间打印歌词

musicLrc = '''[00:03.50]传奇
[00:19.10]作词:刘兵 作曲:李健
[00:20.60]演唱:王菲
[00:26.60]    
[04:40.75][02:39.90][00:36.25]只是因为在人群中多看了你一眼
[04:49.00]
[02:47.44][00:43.69]再也没能忘掉你容颜
[02:54.83][00:51.24]梦想着偶然能有一天再相见
[03:02.32][00:58.75]从此我开始孤单思念
[03:08.15][01:04.30]
[03:09.35][01:05.50]想你时你在天边
[03:16.90][01:13.13]想你时你在眼前
[03:24.42][01:20.92]想你时你在脑海
[03:31.85][01:28.44]想你时你在心田
[03:38.67][01:35.05]
[04:09.96][03:39.87][01:36.25]宁愿相信我们前世有约
[04:16.37][03:46.38][01:42.47]今生的爱情故事 不会再改变
[04:24.82][03:54.83][01:51.18]宁愿用这一生等你发现
[04:31.38][04:01.40][01:57.43]我一直在你身旁 从未走远
[04:39.55][04:09.00][02:07.85]'''
import time
# 创建字典,方便后续放时间:歌词
musicdict = {}
# 以换行切割musicLrc
list1 = musicLrc.splitlines()
# 循环遍历切割出来的列表
for each in list1:
    # 将列表中的数据再次以"]"切割,列表的右边是歌词直接保存到word
    word = each.split("]")[-1]
    # 切割后的列表左边是时间列表,存入time1中
    time1 = each.split("]")[:-1]
    for t in time1:
        # 遍历左边时间表【'[03:38.67''[01:35.05'】,消除"[",并且以:切割时间
        time2 = t[1:].split(":")
        # 将时间数字化,方便后面以key排序
        timesum = float(time2[0])*60 + float(time2[1])
        # 将歌词对应的时间,存入字典
        musicdict[timesum] = word
# print(musicdict)
# 将字典中的key,转化为列表,方便用列表的sort()排序
list2 = list(musicdict.keys())
# 用sort()将时间按照从小到大顺序排列
list2.sort()
# 将歌曲按照时间的顺序依次打印出来
for i in range(len(list2)):
    if i == 0:
        # 歌词显示太慢,这里以十倍的速度打印
        # 第一次休眠就是对应的key
        time.sleep(list2[0]/10)
    else:
        # 第二次开始休眠就是对应的key减去上次的时间
        time.sleep((list2[i]-list2[i-1])/10)
    # 打印歌词
    print(musicdict[list2[i]])

总结:python中很多语句可以叠加,很多单个会用,但是综合起来用,可能有点懵逼,所以需要多加练习。

好的,我将修改你的 `UNet` 模型,使其只有一层编码器和解码器,并保留其他部分不变。以下是修改后的完整代码: ```python import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import Dataset, DataLoader from torchvision import transforms from PIL import Image import os import argparse import glob import matplotlib.pyplot as plt from tqdm import tqdm import numpy as np class UNet(nn.Module): def __init__(self): super(UNet, self).__init__() self.encoder = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2) ) self.decoder = nn.Sequential( nn.ConvTranspose2d(64, 3, kernel_size=2, stride=2), nn.Sigmoid() ) def forward(self, x): x = self.encoder(x) x = self.decoder(x) return x class AttentionBlock(nn.Module): def __init__(self, F_g, F_l, F_int): super(AttentionBlock, self).__init__() self.W_g = nn.Sequential( nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True), nn.BatchNorm2d(F_int) ) self.W_x = nn.Sequential( nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True), nn.BatchNorm2d(F_int) ) self.psi = nn.Sequential( nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True), nn.BatchNorm2d(1), nn.Sigmoid() ) self.relu = nn.ReLU(inplace=True) def forward(self, g, x): g1 = self.W_g(g) x1 = self.W_x(x) psi = self.relu(g1 + x1) psi = self.psi(psi) return x * psi class AttentionUNet(nn.Module): def __init__(self): super(AttentionUNet, self).__init__() self.encoder1 = self.conv_block(3, 64) self.bottleneck = self.conv_block(64, 128) self.upconv1 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2) self.att1 = AttentionBlock(F_g=64, F_l=64, F_int=32) self.decoder1 = self.conv_block(128, 64) self.final_conv = nn.Conv2d(64, 3, kernel_size=1, stride=1, padding=0) self.sigmoid = nn.Sigmoid() def conv_block(self, in_channels, out_channels): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True) ) def forward(self, x): # Encoding e1 = self.encoder1(x) b = self.bottleneck(F.max_pool2d(e1, 2)) # Decoding + Attention Gate d1 = self.upconv1(b) e1 = self.att1(g=d1, x=e1) d1 = torch.cat((e1, d1), dim=1) d1 = self.decoder1(d1) out = self.final_conv(d1) out = self.sigmoid(out) return out class ColorblindDataset(Dataset): def __init__(self, image_dir, mode='train', transform=None): self.image_dir = image_dir self.mode = mode self.transform = transform self.normal_images = glob.glob(image_dir + '/' + mode + '/' + 'origin_image' + '/*') self.recolor_images = glob.glob(image_dir + '/' + mode + '/' + 'recolor_image' + '/' + '*Protanopia*') self.correct_images = glob.glob(image_dir + '/' + mode + '/' + 'correct_image' + '/*') self.normal_images.sort() self.recolor_images.sort() self.correct_images.sort() self.image_pair = [] for index, image in enumerate(self.normal_images): self.image_pair.append([self.normal_images[index], self.recolor_images[index]]) self.image_pair.append([self.correct_images[index], self.normal_images[index]]) def __len__(self): return len(self.normal_images) def __getitem__(self, idx): normal_path, recolor_path = self.image_pair[idx] normal_image = Image.open(normal_path).convert('RGB') recolor_image = Image.open(recolor_path).convert('RGB') if self.transform: normal_image = self.transform(normal_image) recolor_image = self.transform(recolor_image) return normal_image, recolor_image def train_one_epoch(model, dataloader, criterion, optimizer, device): model.train() running_loss = 0.0 for inputs, targets in tqdm(dataloader, desc="Training"): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() running_loss += loss.item() epoch_loss = running_loss / len(dataloader) return epoch_loss def validate(model, dataloader, criterion, device): model.eval() val_loss = 0.0 with torch.no_grad(): for inputs, targets in tqdm(dataloader, desc="Validation"): inputs, targets = inputs.to(device), targets.to(device) outputs = model(inputs) loss = criterion(outputs, targets) val_loss += loss.item() val_loss /= len(dataloader) return val_loss def visualize_results(model, dataloader, device, num_images=10): model.eval() inputs, targets = next(iter(dataloader)) inputs, targets = inputs.to(device), targets.to(device) with torch.no_grad(): outputs = model(inputs) outputs = outputs.cpu().numpy() inputs = inputs.cpu().numpy() targets = targets.cpu().numpy() plt.figure(figsize=(15, 10)) for i in range(num_images): plt.subplot(3, num_images, i + 1) plt.imshow(inputs[i].transpose(1, 2, 0)) plt.title("Original") plt.axis('off') plt.subplot(3, num_images, i + 1 + num_images) plt.imshow(targets[i].transpose(1, 2, 0)) plt.title("Colorblind") plt.axis('off') plt.subplot(3, num_images, i + 1 + 2 * num_images) plt.imshow(outputs[i].transpose(1, 2, 0)) plt.title("Reconstructed") plt.axis('off') plt.show() def plot_and_save_losses(train_losses, val_losses, epoch, path='./loss_plots'): if not os.path.exists(path): os.makedirs(path) epochs = np.arange(1, epoch+2) plt.figure(figsize=(10, 5)) plt.plot(epochs, train_losses, label='Training Loss') plt.plot(epochs, val_losses, label='Validation Loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.title('Training and Validation Losses') plt.legend() plt.savefig(f'{path}/loss_epoch_{epoch+1}.png') plt.close() def main(args): # Data transforms transform = transforms.Compose([ transforms.ToTensor(), transforms.Resize((256, 256)), ]) # Datasets and Dataloaders train_dataset = ColorblindDataset(args.dataset_dir, mode='train', transform=transform) val_dataset = ColorblindDataset(args.dataset_dir, mode='val', transform=transform) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=4, shuffle=False) # Model, Loss, Optimizer device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = UNet().to(device) # 使用简化版的UNet if args.model_pretrained_path: model.load_state_dict(torch.load(args.model_pretrained_path)) print("Successfully load past pretrained weights!!") criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) train_losses = [] val_losses = [] # Training and validation loop for epoch in range(args.num_epochs): train_loss = train_one_epoch(model, train_loader, criterion, optimizer, device) val_loss = validate(model, val_loader, criterion, device) train_losses.append(train_loss) val_losses.append(val_loss) print(f'Epoch {epoch + 1}, Training Loss: {train_loss:.4f}, Validation Loss: {val_loss:.4f}') plot_and_save_losses(train_losses, val_losses, epoch) visualize_results(model, val_loader, device) # Save the model torch.save(model.state_dict(), args.model_save_path) if __name__ == "__main__": parser = argparse.ArgumentParser(description="UNet Colorblind Image Reconstruction") parser.add_argument('--dataset_dir', type=str, default='./dataset', help='Path to the dataset directory') parser.add_argument('--batch_size', type=int, default=16, help='Batch size for training and validation') parser.add_argument('--learning_rate', type=float, default=0.01, help='Learning rate for the optimizer') parser.add_argument('--num_epochs', type=int, default=100, help='Number of training epochs') parser.add_argument('--model_save_path', type=str, default='./model_weights/color_blind_model.pth', help='Path to save the trained model') parser.add_argument('--model_pretrained_path', type=str, default='./model_weights/color_blind_model.pth', help='训练好的色盲模拟器模型路径') args = parser.parse_args() main(args) ``` 在这个版本中,`UNet` 类被简化为只有一层编码器和解码器。其余部分保持不变,包括数据集加载、训练循环和验证函数等。希望这能满足你的需求。如果有任何进一步的问题,请随时告诉我!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值