python深度学习——用Keras实现DeepDream

实现deepdream

# import scipy.misc
import imageio
import scipy.ndimage
import numpy as np
from keras.preprocessing import image
from keras.applications import inception_v3
from keras import backend as K

# 禁用所有与训练有关的操作
K.set_learning_phase(0)

# 构建不包括全连接层的Inception V3网络 使用预训练的ImageNet权重来加载模型
model = inception_v3.InceptionV3(weights='imagenet', include_top=False)

# 这个字典将层的名称映射为一个系数,这个系数定量表示该层激活对你要最大化的损失的贡献大小。
# 注意,层的名称硬编码在内置的 Inception V3 应用中。可以使用 model.summary() 列出所有层的名称
layer_contributions = {
    'mixed2': 0.2,
    'mixed3': 3.,
    'mixed4': 2.,
    'mixed5': 1.5,
}
# 创建一个字典,将层的名称映射为层的实例
layer_dict = dict([(layer.name, layer) for layer in model.layers])

# 在定义损失时将层的贡献添加到这个标量变量中
loss = K.variable(0.)
for layer_name in layer_contributions:
    coeff = layer_contributions[layer_name]
    activation = layer_dict[layer_name].output  # 获取层的输出
    scaling = K.prod(K.cast(K.shape(activation), 'float32'))
    # 将该层特征的L2范数添加到loss中。为了避免出现边界伪影,损失中仅包含非边界的像素
    loss += coeff * K.sum(K.square(activation[:, 2: -2, 2: -2, :])) / scaling

dream = model.input
grads = K.gradients(loss, dream)[0]
grads /= K.maximum(K.mean(K.abs(grads)), 1e-7)
outputs = [loss, grads]
fetch_loss_and_grads = K.function([dream], outputs)


def eval_loss_and_grads(x):
    outs = fetch_loss_and_grads([x])
    loss_value = outs[0]
    grad_values = outs[1]
    return loss_value, grad_values


def gradient_ascent(x, iterations, step, max_loss=None):
    for i in range(iterations):
        loss_value, grad_values = eval_loss_and_grads(x)
        if max_loss is not None and loss_value > max_loss:
            break
        print('...Loss value at', i, ':', loss_value)
        x += step * grad_values

    return x


def resize_img(img, size):
    img = np.copy(img)
    factors = (1,
               float(size[0]) / img.shape[1],
               float(size[1]) / img.shape[2],
               1)
    return scipy.ndimage.zoom(img, factors, order=1)


def save_img(img, fname):
    pil_img = deprocess_image(np.copy(img))
    # scipy.misc.imsave(fname, pil_img)
    # 老版本报错,用新版本的imageio
    imageio.imsave(fname, pil_img)


def preprocess_image(image_path):
    # Util function to open, resize and format pictures into appropriate tensors.
    # 通用函数,用于打开图像、改变图像大小以及将图像格式转换为 Inception V3 模型能够处理的张量
    img = image.load_img(image_path)
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img


def deprocess_image(x):
    # Util function to convert a tensor into a valid image.
    # 通用函数,将一个张量转换为有效图像.
    if K.image_data_format() == 'channels_first':
        x = x.reshape((3, x.shape[2], x.shape[3]))
        x = x.transpose((1, 2, 0))
    else:
        x = x.reshape((x.shape[1], x.shape[2], 3))
    x /= 2.
    x += 0.5
    x *= 255.
    x = np.clip(x, 0, 255).astype('uint8')
    return x


# Playing with these hyperparameters will also allow you to achieve new effects

step = 0.01  # Gradient ascent step size, 梯度上升的步长
num_octave = 3  # Number of scales at which to run gradient ascent,运行梯度上升的尺度个数
octave_scale = 1.4  # Size ratio between scales, 两个尺度之间的大小比例
iterations = 20  # Number of ascent steps per scale, 在每个尺度上运行梯度上升的步数

# If our loss gets larger than 10, we will interrupt the gradient ascent process, to avoid ugly artifacts
# 如果损失增大到大于 10,我们要中断梯度上升过程,以避免得到丑陋的伪影
max_loss = 10.

# 使用的图像
base_image_path = '/mnt/projects/deeplearn/codes/test8_1.jpg'

# Load the image into a Numpy array
img = preprocess_image(base_image_path)

# We prepare a list of shape tuples defining the different scales at which we will run gradient ascent
# 准备一个由形状元组组成的列表,它定义了运行梯度上升的不同尺度
original_shape = img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
    shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
    successive_shapes.append(shape)

# Reverse list of shapes, so that they are in increasing order
successive_shapes = successive_shapes[::-1]

# Resize the Numpy array of the image to our smallest scale
original_img = np.copy(img)
shrunk_original_img = resize_img(img, successive_shapes[0])

for shape in successive_shapes:
    print('Processing image shape', shape)
    img = resize_img(img, shape)
    img = gradient_ascent(img,
                          iterations=iterations,
                          step=step,
                          max_loss=max_loss)
    upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
    same_size_original = resize_img(original_img, shape)
    lost_detail = same_size_original - upscaled_shrunk_original_img

    img += lost_detail
    shrunk_original_img = resize_img(original_img, shape)
    save_img(img, fname='dream_at_scale_' + str(shape) + '.png')

save_img(img, fname='final_dream.png')

原始图片:
在这里插入图片描述结果图片:
dream_at_scale_(169, 254).png
在这里插入图片描述
dream_at_scale_(237, 356).png
在这里插入图片描述dream_at_scale_(333, 499).png
在这里插入图片描述final最终结果图片:
在这里插入图片描述

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值