样式迁移
- 将样式图片中的样式迁移到内容图片上,得到合成图片
基于CNN的样式迁移
- 输入是一张内容的图片和一张样式的图片,目标是将内容的图片应用上样式图片的样式。
- CNN对于图片的出入,每一层都是一些特征,我们训练一个图片使得图片和内容图片,放进同样一个CNN的时候,在某一层它的输出可以匹配上另一个的内容。同样另一个样式图片在放入一个CNN后,在某一层在样式上可以匹配上。
代码实现
# 阅读内容和样式图像
%matplotlib inline
import torch
import torchvision
from torch import nn
from d2l import torch as d2l
d2l.set_figsize()
content_img = d2l.Image.open('/Users/tiger/Desktop/study/机器学习/李沐深度学习/d2l-zh/pytorch/img/rainier.jpg')
d2l.plt.imshow(content_img);
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-NOzsF1b0-1630551548233)(output_2_0.svg)]
style_img = d2l.Image.open('/Users/tiger/Desktop/study/机器学习/李沐深度学习/d2l-zh/pytorch/img/autumn-oak.jpg')
d2l.plt.imshow(style_img);
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-1dK4vI0F-1630551548235)(output_3_0.svg)]
# 预处理和后处理
rgb_mean = torch.tensor([0.485, 0.456, 0.406])
rgb_std = torch.tensor([0.229, 0.224, 0.225])
# 将图片变为Tensor
def preprocess(img, image_shape):
transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(image_shape),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=rgb_mean, std=rgb_std)])
return transforms(img).unsqueeze(0)
# 将Tensor变为图片
def postprocess(img):
img = img[0].to(rgb_std.device)
img = torch.clamp(img.permute(1, 2, 0) * rgb_std + rgb_mean, 0, 1)
return torchvision.transforms.ToPILImage()(img.permute(2, 0, 1))
# 抽取图像特征
pretrained_net = torchvision.models.vgg19(pretrained=True)
style_layers, content_layers = [0, 5, 10, 19, 28], [25] # 越靠近数据对内容的还原越好,越往下对细节还原的越好
net = nn.Sequential(*[
pretrained_net.features[i]
for i in range(max(content_layers + style_layers) + 1)]) # 只要28层,28层之后的丢掉
# 给定X,样式层,内容层,然后来抽取特征
def extract_features(X, content_layers, style_layers):
contents = []
styles = []
for i in range(len(net)):
X = net[i](X)
if i in style_layers:
styles.append(X)
if i in content_layers:
contents.append(X)
return contents, styles # 返回内容和样式
# 拿到内容
def get_contents(image_shape, device):
content_X = preprocess(content_img, image_shape).to(device)
contents_Y, _ = extract_features(content_X, content_layers, style_layers)
return content_X, contents_Y
# 拿到样式
def get_styles(image_shape, device):
style_X = preprocess(style_img, image_shape).to(device)
_, styles_Y = extract_features(style_X, content_layers, style_layers)
return style_X, styles_Y
Downloading: "https://download.pytorch.org/models/vgg19-dcbb9e9d.pth" to /Users/tiger/.cache/torch/hub/checkpoints/vgg19-dcbb9e9d.pth
98.8%
# 定义损失函数
def content_loss(Y_hat, Y):
return torch.square(Y_hat - Y.detach()).mean() # 内容直接就是均方误差
def gram(X):
num_channels, n = X.shape[1], X.numel() // X.shape[1]
X = X.reshape((num_channels, n))
return torch.matmul(X, X.T) / (num_channels * n)
def style_loss(Y_hat, gram_Y):
return torch.square(gram(Y_hat) - gram_Y.detach()).mean() # 比较gram矩阵的均方误差
def tv_loss(Y_hat):
return 0.5 * (torch.abs(Y_hat[:, :, 1:, :] - Y_hat[:, :, :-1, :]).mean() +
torch.abs(Y_hat[:, :, :, 1:] - Y_hat[:, :, :, :-1]).mean()) # tv降噪的一个算法
# 风格转移的损失函数是内容损失、风格损失和总变化损失的加权和
content_weight, style_weight, tv_weight = 1, 1e3, 10
def compute_loss(X, contents_Y_hat, styles_Y_hat, contents_Y, styles_Y_gram):
contents_l = [
content_loss(Y_hat, Y) * content_weight
for Y_hat, Y in zip(contents_Y_hat, contents_Y)]
styles_l = [
style_loss(Y_hat, Y) * style_weight
for Y_hat, Y in zip(styles_Y_hat, styles_Y_gram)]
tv_l = tv_loss(X) * tv_weight
l = sum(10 * styles_l + contents_l + [tv_l])
return contents_l, styles_l, tv_l, l # 返回所有的损失
# 初始化合成图像
class SynthesizedImage(nn.Module): # 告诉图片的shape,拿到weight
def __init__(self, img_shape, **kwargs):
super(SynthesizedImage, self).__init__(**kwargs)
self.weight = nn.Parameter(torch.rand(*img_shape))
def forward(self):
return self.weight
# 调用合成图片
def get_inits(X, device, lr, styles_Y):
gen_img = SynthesizedImage(X.shape).to(device)
gen_img.weight.data.copy_(X.data)
trainer = torch.optim.Adam(gen_img.parameters(), lr=lr)
styles_Y_gram = [gram(Y) for Y in styles_Y] # 算好
return gen_img(), styles_Y_gram, trainer
# 训练
def train(X, contents_Y, styles_Y, device, lr, num_epochs, lr_decay_epoch):
X, styles_Y_gram, trainer = get_inits(X, device, lr, styles_Y)
scheduler = torch.optim.lr_scheduler.StepLR(trainer, lr_decay_epoch, 0.8)
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[10, num_epochs],
legend=['content', 'style',
'TV'], ncols=2, figsize=(7, 2.5))
for epoch in range(num_epochs):
trainer.zero_grad()
contents_Y_hat, styles_Y_hat = extract_features(
X, content_layers, style_layers)
contents_l, styles_l, tv_l, l = compute_loss(X, contents_Y_hat,
styles_Y_hat, contents_Y,
styles_Y_gram)
l.backward()
trainer.step()
scheduler.step()
if (epoch + 1) % 10 == 0:
animator.axes[1].imshow(postprocess(X))
animator.add(
epoch + 1,
[float(sum(contents_l)),
float(sum(styles_l)),
float(tv_l)])
return X
# 训练模型
device, image_shape = d2l.try_gpu(), (300, 450)
net = net.to(device)
content_X, contents_Y = get_contents(image_shape, device)
_, styles_Y = get_styles(image_shape, device)
output = train(content_X, contents_Y, styles_Y, device, 0.3, 500, 50)