tensorflow入门测试代码

tensorflow入门

初识

import tensorflow as tf
from tensorflow.keras import datasets, layers, optimizers
import numpy as np
from tensorflow import keras
from keras.layers.embeddings import Embedding
(x, y), (x_test, y_test) = datasets.mnist.load_data()
print('x', x, type(x))
print('y', y, type(y))
# print(x[0])
print('datasets:', x.shape, y.shape)
db = tf.data.Dataset.from_tensor_slices((x, y))
# for step, (X, Y) in enumerate(db):
#     print(step, X.shape, Y, Y.shape)
#
# for step, (X, Y) in enumerate(db.batch(64)):
#     print(step, X.shape, Y, Y.shape)


with tf.device('cpu'):
    const = tf.constant([[1.11, 2.22], [3.33, 4.44]], name='Var')
print(const.device)
print(const.numpy())
print(const.ndim)
print(tf.rank(const))


a = np.arange(5)
print(a.dtype)
aa = tf.convert_to_tensor(a)
print(aa)
aa = tf.convert_to_tensor(a, dtype=tf.float32)
print(aa)
aaa = tf.cast(aa, dtype=tf.double, name='change')
print(aaa)

b = tf.constant([0, 1])
bb = tf.cast(b, dtype=tf.bool)
bbb = tf.cast(bb, dtype=tf.int32)
print(bb)
print(bbb)

a = tf.range(5)
b = tf.Variable(a)
print(b.dtype)
print(b.name)
print(b.trainable)
b = tf.Variable(a, name='input_data')
print(b.name)
print(b.trainable)

print(isinstance(b, tf.Tensor))
print(isinstance(b, tf.Variable))
print(tf.is_tensor(b))
print(b.numpy())

# from numpy, list
print(tf.convert_to_tensor(np.ones([2, 3])))
print(tf.convert_to_tensor(np.zeros([2, 3])))
print(tf.convert_to_tensor([1, 2]))
print(tf.convert_to_tensor([1, 2.]))
print(tf.convert_to_tensor([[1], [2.]]))
print(tf.convert_to_tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))

# zeros
print(tf.zeros([]))
print(tf.zeros([1]))
print(tf.zeros([1, 2]))
print(tf.zeros([2, 3]))
print(tf.zeros([2, 3, 4]))

# zeros_like
a = tf.zeros([2, 3, 3])
print(a.shape)
print(tf.zeros_like(a))
print(tf.zeros(a.shape))

# ones
a = tf.ones(1)
print(a)
b = tf.ones([])
print(b)
print(tf.ones([2]))
print(tf.ones([2, 3]))
print(tf.ones_like(a))

# fill
print(tf.fill([2, 2], 0))
print(tf.fill([2, 2], 0.))
print(tf.fill([2, 2], 9))

# normal
print(tf.random.normal([2, 2], mean=1, stddev=1))
print(tf.random.normal([2, 2]))
print(tf.random.truncated_normal([2, 2], mean=0, stddev=1))

# uniform
print(tf.random.uniform([2, 2], minval=0, maxval=1))
print(tf.random.uniform([2, 2], minval=0, maxval=100))

# random permutation
idx = tf.range(10)
idx = tf.random.shuffle(idx)
a = tf.random.normal([10, 784])
b = tf.random.uniform([10], maxval=10, dtype=tf.int32)
print(a)
print(b)
a = tf.gather(a, idx)
b = tf.gather(b, idx)
print(a)
print(b)

# constant
print(tf.constant(1))
print(tf.constant([1]))
print(tf.constant([1, 2.]))
print(tf.constant([[1, 2.], [3., 4]]))

# loss
out = tf.random.uniform([4, 10])
print(out)
y = tf.range(4)
print(y)
y = tf.one_hot(y, depth=10)
print(y)
loss = tf.keras.losses.mse(y, out)
print(loss)
loss = tf.reduce_mean(loss)
print(loss)

# vector
net = layers.Dense(10)
print(net)
net.build((4, 8))
# print(net)
print(net.kernel)
print(net.bias)

#matrix
x = tf.random.normal([4, 784])
print(x)
net = layers.Dense(10)
net.build((4, 784))
print(net(x).shape)
print(net.kernel.shape)
print(net.bias.shape)


(X_train, y_train), (X_test, y_test) = keras.datasets.imdb.load_data(num_words=10000)
x_train = keras.preprocessing.sequence.pad_sequences(X_train, maxlen=80)
print(x_train.shape)
# emb = embedding(x_train)
# print(emb.shape)
# out = rnn(emb[:4])
# print(out.shape)



x = tf.random.normal((4, 32, 32, 3))
net = layers.Conv2D(16, kernel_size=3)
print(net(x))


a = tf.ones([1, 5, 5, 3])
print(a[0][0])
print(a[0][0][0])
print(a[0][0][2])

# 索引与切片
a = tf.random.normal([4, 28, 28, 3])
print(a[1].shape)
print(a[1, 2].shape)
print(a[1, 2, 3].shape)
print(a[1, 2, 3, 2])

a = tf.range(10)
print(a[-1:])
print(a[-2:])
print(a[:2])
print(a[:-1])

a = tf.random.normal([4, 28, 28, 3])
print(a[0].shape)
print(a[0, :, :, :].shape)
print(a[:, :, :, 0].shape)
print(a[:, 0, :, :].shape)
print(a[0:4:2, :, :, :].shape)
print(a[:, ::4, ::7, :].shape)

a = tf.range(4)
print(a[::-1])
print(a[2::-1])


a = tf.random.normal([2, 4, 28, 28, 3])
print(a.shape)
print(a[0, :, :, :, :].shape)
print(a[0, ...].shape)
print(a[..., 0].shape)
print(a[0, ..., 2].shape)
print(a[1, 0, ..., 0].shape)
print(a[1, 0, ..., 0])

# gather
a = tf.random.normal([4, 35, 8])
print(tf.gather(a, axis=0, indices=[2, 3]).shape)
print(a[2:4].shape)
print(tf.gather(a, axis=0, indices=[2, 1, 3, 0]).shape)
print(tf.gather(a, axis=1, indices=[2, 3, 7, 9, 16]).shape)
print(tf.gather(a, axis=2, indices=[5, 2, 4]).shape)
# gather_nd
print(tf.gather_nd(a, [0, 1]).shape)
print(tf.gather_nd(a, [0, 1, 2]))
print(tf.gather_nd(a, [0, 1, 2]).shape)
print(tf.gather_nd(a, [[0, 1, 2]]))
print(tf.gather_nd(a, [[0, 1, 2]]).shape)
print(tf.gather_nd(a, [[0, 0], [1, 1]]).shape)
print(tf.gather_nd(a, [[0, 0], [1, 1], [2, 2]]).shape)
print(tf.gather_nd(a, [[0, 0, 0], [1, 1, 1], [2, 2, 2]]).shape)
print(tf.gather_nd(a, [[[0, 0, 0], [1, 1, 1], [2, 2, 2]]]).shape)

# boolean_mask
a = tf.random.normal([4, 28, 28, 3])
print(tf.boolean_mask(a, mask=[True, True, False, False]).shape)
print(tf.boolean_mask(a, mask=[True, False, True], axis=3).shape)
a = tf.ones([2, 3, 4])
print(tf.boolean_mask(a, mask=[[True, False, False], [False, True, True]]))


# reshape
a = tf.random.normal([4, 28, 28, 3])
print(a.shape, a.ndim)
print(tf.reshape(a, [4, 784, 3]).shape)
print(tf.reshape(a, [4, -1, 3]).shape)
print(tf.reshape(a, [4, -1]).shape)
print(tf.reshape(tf.reshape(a, [4, -1]), [4, 14, 56, 3]).shape)


# transpose
a = tf.random.normal((4, 3, 2, 1))
print(a.shape)
print(tf.transpose(a).shape)
print(tf.transpose(a, perm=[0, 2, 1, 3]).shape)


# expand_dims
a = tf.random.normal([4, 35, 8])
print(tf.expand_dims(a, axis=0).shape)
print(tf.expand_dims(a, axis=3).shape)
print(tf.expand_dims(a, axis=-1).shape)
print(tf.expand_dims(a, axis=-4).shape)

# squeeze_dim
print(tf.squeeze(tf.zeros([1, 2, 1, 1, 3])).shape)
a = tf.zeros([1, 2, 1, 3])
print(tf.squeeze(a, axis=0).shape)
print(tf.squeeze(a, axis=2).shape)
print(tf.squeeze(a, axis=-4).shape)


# broadcast_to
b = tf.broadcast_to(tf.random.normal([4, 1, 1, 1]), [4, 32, 32, 3])
print(b.shape)
# tile
a = tf.ones([3, 4])
a1 = tf.broadcast_to(a, [2, 3, 4])
a2 = tf.expand_dims(a, axis=0)
a2 = tf.tile(a2, [2, 1, 1])
print(a1.shape)
print(a2.shape)


# math
b = tf.ones([2, 2])
a = tf.fill([2, 2], 2.)
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(b // a)
print(b % a)
print(tf.math.log(b))
print(tf.exp(b))
# log(2)
print(tf.math.log(b)/tf.math.log(2.))

# pow,sqrt
print(tf.pow(a, 3))
print(a**3)
print(tf.sqrt(a))

# matmul
print(b @ a)
print(tf.matmul(b, a))
a = tf.ones([4, 2, 3])
b = tf.fill([4, 3, 5], 2.)
print(a @ b)
print(tf.matmul(a, b))

x = tf.ones([4, 2])
W = tf.ones([2, 1])
b = tf.constant([0.1])
print(x @ W + b)

张量前向传播

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

(x, y), _ = datasets.mnist.load_data()
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32)
# print(x.shape, y.shape, x.dtype, y.dtype)
# print(tf.reduce_min(x), tf.reduce_max(x), tf.reduce_min(y), tf.reduce_max(y))

train_db = tf.data.Dataset.from_tensor_slices((x, y)).batch(128)
train_iter = iter(train_db)
sample = next(train_iter)
print('batch:', sample[0].shape, sample[1].shape)


# [b, 784]->[b, 255]->[b, 128]->[b, 10]
# [dim_in, dim_out]
w1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1))
b1 = tf.Variable(tf.zeros([256]))
w2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1))
b2 = tf.Variable(tf.zeros([128]))
w3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10]))
lr = 1e-3


for epoch in range(10):
    for step, (x, y) in enumerate(train_db):
        # x:[128, 28, 28]
        # y:[128]
        # [b, 28, 28] => [b, 28*28]
        x = tf.reshape(x, [-1, 28*28])
        with tf.GradientTape() as tape: # tf.Variable
            # x:[b, 28*28]
            # h1 = x@w1+b1
            # [b, 784]@[784, 256] + [256] => [b, 256] + [256] => [b, 256] + [b, 256]
            h1 = x@w1 + tf.broadcast_to(b1, [x.shape[0], 256])
            h1 = tf.nn.relu(h1)
            h2 = h1@w2 + b2
            h2 = tf.nn.relu(h2)
            out = h2@w3 + b3
            # compute loss
            # out: [b, 10]
            # y: [b] => [b, 10]
            y_onehot = tf.one_hot(y, depth=10)
            # print(y_onehot)
            # mse = mean(sum(y-out)^2)
            # [b, 10]
            loss = tf.square(y_onehot - out)
            # print(loss)
            # mean: scalar
            loss = tf.reduce_mean(loss)
            # print(loss)
        grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])
        # w1 = w1 - lr * w1_grad
        w1.assign_sub(lr * grads[0])
        b1.assign_sub(lr * grads[1])
        w2.assign_sub(lr * grads[2])
        b2.assign_sub(lr * grads[3])
        w3.assign_sub(lr * grads[4])
        b3.assign_sub(lr * grads[5])

        # w1 = w1 - lr * grads[0])
        # b1 = b1 - lr * grads[1])
        # w2 = w2 - lr * grads[2])
        # b2 = b2 - lr * grads[3])
        # w3 = w3 - lr * grads[4])
        # b3 = b3 - lr * grads[5])

        if step % 100 == 0:
            print(epoch, step, 'loss:', float(loss))

合并与分割

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# concat
a = tf.ones([4, 35, 8])
b = tf.ones([2, 35, 8])
c = tf.concat([a, b], axis=0)
print(c.shape)
a = tf.ones([4, 32, 8])
b = tf.ones([4, 3, 8])
print(tf.concat([a, b], axis=1).shape)

# stack
a = tf.ones([4, 35, 8])
b = tf.ones([4, 35, 8])
print(tf.concat([a, b], axis=-1).shape)
print(tf.stack([a, b], axis=0).shape)
print(tf.stack([a, b], axis=2).shape)
print(tf.stack([a, b], axis=3).shape)
c = tf.stack([a, b])
aa, bb = tf.unstack(c, axis=0)
print(c.shape)
print(aa.shape, bb.shape)
res = tf.unstack(c, axis=3)
print(res[1].shape)

# split
res = tf.split(c, axis=3, num_or_size_splits=2)
print(len(res))
print(res[0].shape)
res = tf.split(c, axis=3, num_or_size_splits=[2, 2, 4])
print(res[0].shape, res[1].shape, res[2].shape)

数据统计

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# norm
a = tf.ones([2, 2])
print(tf.norm(a))
print(tf.sqrt(tf.reduce_sum(tf.square(a))))
a = tf.ones([4, 28, 28, 3])
print(tf.norm(a))
print(tf.sqrt(tf.reduce_sum(tf.square(a))))

b = tf.ones([2, 2])
print(tf.norm(b))
print(tf.norm(b, ord=2, axis=1))
print(tf.norm(b, ord=1))
print(tf.norm(b, ord=1, axis=0))
print(tf.norm(b, ord=1, axis=1))


# reduce_min/max/mean
a = tf.random.normal([4, 10])
print(tf.reduce_min(a), tf.reduce_max(a), tf.reduce_mean(a))
print(tf.reduce_min(a, axis=1), tf.reduce_max(a, axis=1), tf.reduce_mean(a, axis=1))


# argmax/argmin
a = tf.random.normal([4, 10])
print(tf.argmax(a).shape)
print(tf.argmax(a))
print(tf.argmin(a, axis=1).shape)


# equal
a = tf.constant([0, 2, 3, 2, 5])
b = tf.range(5)
res = tf.equal(a, b)
print(res)
print(tf.reduce_sum(tf.cast(res, dtype=tf.int32)))


# accuracy
a = tf.convert_to_tensor([[0.1, 0.2, 0.7], [0.9, 0.05, 0.05]])
pred = tf.cast(tf.argmax(a, axis=1), dtype=tf.int32)
print(pred)
y = tf.convert_to_tensor([2, 1])
print(y)
print(tf.equal(y, pred))
correct = tf.reduce_sum(tf.cast(tf.equal(y, pred), dtype=tf.int32))
print(correct)
print(correct/2)


# unique
a = tf.range(5)
print(tf.unique(a))
a = tf.constant([4, 2, 2, 4, 3])
print(tf.unique(a))

张量排序

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# sort/argsort
a = tf.random.shuffle(tf.range(5))
print(a)
print(tf.sort(a, direction='DESCENDING'))
print(tf.argsort(a, direction='DESCENDING'))
idx = tf.argsort(a, direction='DESCENDING')
print(tf.gather(a, idx))
a = tf.random.uniform([3, 3], maxval=10, dtype=tf.int32)
print(a)
print(tf.sort(a))
print(tf.sort(a, direction='DESCENDING'))
idx = tf.argsort(a)
print(idx)


# top_k
a = tf.convert_to_tensor([[4, 6, 8], [9, 4, 7], [4, 5, 1]])
res = tf.math.top_k(a, 2)
print(res.indices)
print(res.values)
# top_k
prob = tf.constant([[0.1, 0.2, 0.7], [0.2, 0.7, 0.1]])
target = tf.constant([2, 0])
k_b = tf.math.top_k(prob, 3).indices
print(k_b)
k_b = tf.transpose(k_b, [1, 0])
print(k_b)
target = tf.broadcast_to(target, [3, 2])
print(target)

张量填充与复制

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# # pad
a = tf.reshape(tf.range(9), [3, 3])
print(tf.pad(a, [[0, 0], [0, 0]]))
print(tf.pad(a, [[1, 0], [0, 0]]))
print(tf.pad(a, [[1, 1], [0, 0]]))
print(tf.pad(a, [[1, 1], [1, 0]]))
print(tf.pad(a, [[1, 1], [1, 1]]))


# tile
print(tf.tile(a, [1, 2]))
print(tf.tile(a, [2, 1]))
print(tf.tile(a, [2, 2]))

张量限幅

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# clip_by_value
a = tf.reshape(tf.range(9), [3, 3])
print(tf.maximum(a, 2))
print(tf.minimum(a, 8))
print(tf.clip_by_value(a, 2, 6))

# relu
a = a - 5
print(a)
print(tf.nn.relu(a))
print(tf.maximum(a, -1))

# clip_by_norm
a = tf.random.normal([2, 2], mean=10)
print(a)
print(tf.norm(a))
aa = tf.clip_by_norm(a, 15)
print(aa)
print(tf.norm(aa))

# clip_by_global_norm
# 伪
# for g in grads:
#     print(tf.norm(g))
# grads, _ = tf.clip_by_global_norm(grads, 15)
# for g in grads:
#     print(tf.norm(g))

进阶

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

a = tf.random.normal([3, 3])
mask = (a > 0)
print(mask)
print(tf.boolean_mask(a, mask))
indices = tf.where(mask)
print(indices)
print(tf.gather_nd(a, indices))

mask = tf.convert_to_tensor([[True, True, False], [True, False, False], [True, True, False]])
A = tf.ones([3, 3]) - 2
B = tf.zeros([3, 3]) + 9
print(tf.where(mask, A, B)) # True在A取样, False在B

# scatter_nd
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
shape = tf.constant([8])
print(shape)
print(tf.scatter_nd(indices, updates, shape))
shape = tf.constant([4, 4, 4])
updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]])
indices = tf.constant([[0], [2]])
print(tf.scatter_nd(indices, updates, shape))

# meshgrid/stack
y = tf.linspace(-2., 2, 5)
x = tf.linspace(-2., 2, 5)
point_x, point_y = tf.meshgrid(x, y)
print(point_x, point_y)
points = tf.stack([point_x, point_y], axis=2)
print(points)

数据集加载

from tensorflow import keras
import tensorflow as tf

(x, y), (x_test, y_test) = keras.datasets.mnist.load_data()
(x, y), (x_test, y_test) = keras.datasets.cifar10.load_data()
print(x.shape, y.shape, x_test.shape, y_test.shape)
print(x.min(), x.max())
print(y[:4])
db = tf.data.Dataset.from_tensor_slices(x_test)
print(next(iter(db)).shape)
db = tf.data.Dataset.from_tensor_slices((x_test, y_test))
print(next(iter(db))[0].shape, next(iter(db))[1].shape)
db = db.shuffle(10000)

def preprocess(x, y):
    x = tf.cast(x, dtype=tf.float32)/255.
    y = tf.cast(y, dtype=tf.int32)
    y = tf.one_hot(y, depth=10)
    return x, y

db2 = db.map(preprocess)
res = next(iter(db2))
print(res[0].shape, res[1].shape)
print(res[1][:2])

db3 = db2.batch(32)
res = next(iter(db3))
print(res[0].shape, res[1].shape)
db_iter = iter(db3)
# while 1:
#     next(db_iter)  # raise StopIteration
db4 = db3.repeat(2)
res_now = next(iter(db4))
print(res_now[0].shape, res_now[1].shape)

全连接层

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os

x = tf.random.normal([4, 784])
net = tf.keras.layers.Dense(512)
out = net(x)
print(out.shape)
print(net.kernel.shape, net.bias.shape)

net = tf.keras.layers.Dense(10)
print(net.get_weights())
print(net.weights)
net.build(input_shape=(None, 4))
print(net.kernel.shape, net.bias.shape)
net.build(input_shape=(None, 20))
print(net.kernel.shape, net.bias.shape)
net.build(input_shape=(2, 4))
print(net.kernel)

x = tf.random.normal([2, 3])
model = keras.Sequential([keras.layers.Dense(2, activation='relu'), keras.layers.Dense(2, activation='relu'), keras.layers.Dense(2)])
model.build(input_shape=[None, 3])
model.summary()
print('!'*20)
for p in model.trainable_variables:
    print(p.name, p.shape)

MSE、Entropy

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os

# MSE
y = tf.constant([1, 2, 3, 0, 2])
y = tf.one_hot(y, depth=4)
y = tf.cast(y, dtype=tf.float32)
out = tf.random.normal([5, 4])
print(y, out)
loss1 = tf.reduce_mean(tf.square(y-out))
loss2 = tf.square(tf.norm(y-out))/(5*4)
loss3 = tf.reduce_mean(tf.losses.MSE(y, out))
print(loss1, loss2, loss3)

# Lottery
a = tf.fill([4], 0.25)
print(a)
print(a*tf.math.log(a)/tf.math.log(2.))
print(-tf.reduce_sum(a*tf.math.log(a)/tf.math.log(2.)))
a = tf.constant([0.1, 0.1, 0.1, 0.7])
print(-tf.reduce_sum(a*tf.math.log(a)/tf.math.log(2.)))
a = tf.constant([0.01, 0.01, 0.01, 0.97])
print(-tf.reduce_sum(a*tf.math.log(a)/tf.math.log(2.)))

# Categorical Cross Entropy
print(tf.losses.categorical_crossentropy([0, 1, 0, 0], [0.25, 0.25, 0.25, 0.25]))
print(tf.losses.categorical_crossentropy([0, 1, 0, 0], [0.1, 0.1, 0.7, 0.1]))
print(tf.losses.categorical_crossentropy([0, 1, 0, 0], [0.1, 0.7, 0.1, 0.1]))
print(tf.losses.categorical_crossentropy([0, 1, 0, 0], [0.01, 0.97, 0.01, 0.01]))

Gradient

import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf

def himmelblau(x):
    return (x[0] ** 2 + x[1] - 11) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2

x = np.arange(-6, 6, 0.1)
y = np.arange(-6, 6, 0.1)
print('x、y range:', x.shape, y.shape)
X, Y = np.meshgrid(x, y)
print('X、Y maps:', X.shape, Y.shape)
Z = himmelblau([X, Y])

fig = plt.figure('himmelblau')
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z)
ax.view_init(60, -30)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.show()

x = tf.constant([-4., 0.])
for step in range(200):
    with tf.GradientTape() as tape:
        tape.watch([x])
        y = himmelblau(x)
    grads = tape.gradient(y, [x])[0]
    x -= 0.001*grads
    if step % 20 == 0:
        print('step {}: x = {} fx = {}'.format(step, x.numpy(), y.numpy()))
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值