import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications import inception_v3
base_image_path = keras.utils.get_file("sky.jpg","https://i.imgur.com/aGBdQyK.jpg")
result_prefix ="sky_dream"# These are the names of the layers# for which we try to maximize activation,# as well as their weight in the final loss# we try to maximize.# You can tweak these setting to obtain new visual effects.
layer_settings ={"mixed4":1.0,"mixed5":1.5,"mixed6":2.0,"mixed7":2.5,}# Playing with these hyperparameters will also allow you to achieve new effects
step =0.01# Gradient ascent step size
num_octave =3# Number of scales at which to run gradient ascent
octave_scale =1.4# Size ratio between scales
iterations =20# Number of ascent steps per scale
max_loss =15.0
Downloading data from https://i.imgur.com/aGBdQyK.jpg
131072/127372 [==============================] - 0s 3us/step
from IPython.display import Image, display
display(Image(base_image_path))
defpreprocess_image(image_path):# Util function to open, resize and format pictures# into appropriate arrays.
img = keras.preprocessing.image.load_img(image_path)
img = keras.preprocessing.image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)return img
defdeprocess_image(x):# Util function to convert a NumPy array into a valid image.
x = x.reshape((x.shape[1], x.shape[2],3))# Undo inception v3 preprocessing
x /=2.0
x +=0.5
x *=255.0# Convert to uint8 and clip to the valid range [0, 255]
x = np.clip(x,0,255).astype("uint8")return x
# Build an InceptionV3 model loaded with pre-trained ImageNet weights
model = inception_v3.InceptionV3(weights="imagenet", include_top=False)# Get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict =dict([(layer.name, layer.output)for layer in[model.get_layer(name)for name in layer_settings.keys()]])# Set up a model that returns the activation values for every target layer# (as a dict)
feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict)
defcompute_loss(input_image):
features = feature_extractor(input_image)# Initialize the loss
loss = tf.zeros(shape=())for name in features.keys():
coeff = layer_settings[name]
activation = features[name]# We avoid border artifacts by only involving non-border pixels in the loss.
scaling = tf.reduce_prod(tf.cast(tf.shape(activation),"float32"))
loss += coeff * tf.reduce_sum(tf.square(activation[:,2:-2,2:-2,:]))/ scaling
return loss
@tf.function
defgradient_ascent_step(img, learning_rate):with tf.GradientTape()as tape:
tape.watch(img)
loss = compute_loss(img)# Compute gradients.
grads = tape.gradient(loss, img)# Normalize gradients.
grads /= tf.maximum(tf.reduce_mean(tf.abs(grads)),1e-6)
img += learning_rate * grads
return loss, img
defgradient_ascent_loop(img, iterations, learning_rate, max_loss=None):for i inrange(iterations):
loss, img = gradient_ascent_step(img, learning_rate)if max_loss isnotNoneand loss > max_loss:breakprint("... Loss value at step %d: %.2f"%(i, loss))return img
Processing octave 0 with shape (326, 489)
... Loss value at step 0: 0.45
... Loss value at step 1: 0.63
... Loss value at step 2: 0.91
... Loss value at step 3: 1.24
... Loss value at step 4: 1.58
... Loss value at step 5: 1.90
... Loss value at step 6: 2.22
... Loss value at step 7: 2.51
... Loss value at step 8: 2.82
... Loss value at step 9: 3.12
... Loss value at step 10: 3.39
... Loss value at step 11: 3.69
... Loss value at step 12: 3.93
... Loss value at step 13: 4.19
... Loss value at step 14: 4.44
... Loss value at step 15: 4.71
... Loss value at step 16: 4.93
... Loss value at step 17: 5.21
... Loss value at step 18: 5.42
... Loss value at step 19: 5.66
Processing octave 1 with shape (457, 685)
... Loss value at step 0: 1.08
... Loss value at step 1: 1.76
... Loss value at step 2: 2.32
... Loss value at step 3: 2.80
... Loss value at step 4: 3.23
... Loss value at step 5: 3.62
... Loss value at step 6: 4.03
... Loss value at step 7: 4.43
... Loss value at step 8: 4.77
... Loss value at step 9: 5.11
... Loss value at step 10: 5.45
... Loss value at step 11: 5.78
... Loss value at step 12: 6.10
... Loss value at step 13: 6.42
... Loss value at step 14: 6.73
... Loss value at step 15: 7.03
... Loss value at step 16: 7.30
... Loss value at step 17: 7.61
... Loss value at step 18: 7.85
... Loss value at step 19: 8.15
Processing octave 2 with shape (640, 960)
... Loss value at step 0: 1.28
... Loss value at step 1: 2.03
... Loss value at step 2: 2.66
... Loss value at step 3: 3.19
... Loss value at step 4: 3.67
... Loss value at step 5: 4.15
... Loss value at step 6: 4.59
... Loss value at step 7: 4.99
... Loss value at step 8: 5.39
... Loss value at step 9: 5.75
... Loss value at step 10: 6.16
... Loss value at step 11: 6.48
... Loss value at step 12: 6.81
... Loss value at step 13: 7.11
... Loss value at step 14: 7.49
... Loss value at step 15: 7.76
... Loss value at step 16: 8.07
... Loss value at step 17: 8.40
... Loss value at step 18: 8.64
... Loss value at step 19: 8.93