tf中的自定义网络

tf中的自定义网络

1. keras.Sequential

  • 容器,里面声明层,连接方式,激活函数
network = Sequential([layers.Dense(256, activation='relu'),
                    layers.Dense(128, activation='relu'),
                    layers.Dense(64, activation='relu'),
                    layers.Dense(32, activation='relu'),
                    layers.Dense(10)
            ])
# network.build(input_shape=(None, 28*28))用于创建需要更新参数
# 也可用network(x)创建需要更新参数
network.build(input_shape=(None, 28*28))
network.summary()

2. keras.layers.Layer

  • 先初始化(预处理)(init),再(call)
  • 定义一个父类(提供了基本功能的类),所有自定义的类必须继承这个父类
  • 自定义的类再init中调用父类的初始化方法,在call中添加自己的逻辑
class MyDense(layers.Layer):
    def __int__(self, inp_dim, outp_dim):
        super(MyDense, self).__init__()
        self.kernel = self.add_variable('w', [inp_dim, outp_dim])
        self.bias = self.add_variable('b', [outp_dim])
    def call(self, inputs, training=None):
        out = inputs@self.kernel + self.bias
        return out    

3. keras.Model

  • Model可以使用compile/fit/evaluate/predict(keras的高层API)
  • 有时候,我们需要在网络结构中(一个modle)中添加一些特别的层(layer),这些特别的层与别的层(继承于父类的层)不同,这些特别的层通过keras.layers.Layer声明,其它层通过keras.Model声明
class MyModel(keras.Model):
    def __init__(self):
        super(MyModel, self).__init__()
        self.cf1 = MyDense(28*28,256)
        self.cf2 = MyDense(256,128)
        self.cf3 = MyDense(128,64)
        self.cf4 = MyDense(64,32)
        self.cf5 = MyDense(32,10)
    def call(self, inputs, training=None):
        x = self.fc1(inputs)
        x = tf.nn.relu(x)    
        x = self.fc2(inputs)
        x = tf.nn.relu(x)
        x = self.fc3(inputs)
        x = tf.nn.relu(x) 
        x = self.fc4(inputs)
        x = tf.nn.relu(x) 
        x = self.fc5(inputs)
        return x  
  1. 实战
import  tensorflow as tf
from    tensorflow.keras import datasets, layers, optimizers, Sequential, metrics
from 	tensorflow import keras

def preprocess(x, y):
    """
    x is a simple image, not a batch
    """
    x = tf.cast(x, dtype=tf.float32) / 255.
    x = tf.reshape(x, [28*28])
    y = tf.cast(y, dtype=tf.int32)
    y = tf.one_hot(y, depth=10)
    return x,y


batchsz = 128
(x, y), (x_val, y_val) = datasets.mnist.load_data()
print('datasets:', x.shape, y.shape, x.min(), x.max())



db = tf.data.Dataset.from_tensor_slices((x,y))
db = db.map(preprocess).shuffle(60000).batch(batchsz)
ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
ds_val = ds_val.map(preprocess).batch(batchsz) 

sample = next(iter(db))
print(sample[0].shape, sample[1].shape)


network = Sequential([layers.Dense(256, activation='relu'),
                     layers.Dense(128, activation='relu'),
                     layers.Dense(64, activation='relu'),
                     layers.Dense(32, activation='relu'),
                     layers.Dense(10)])
network.build(input_shape=(None, 28*28))
network.summary()


class MyDense(layers.Layer):

	def __init__(self, inp_dim, outp_dim):
		super(MyDense, self).__init__()

		self.kernel = self.add_weight('w', [inp_dim, outp_dim])
		self.bias = self.add_weight('b', [outp_dim])

	def call(self, inputs, training=None):

		out = inputs @ self.kernel + self.bias

		return out 

class MyModel(keras.Model):

	def __init__(self):
		super(MyModel, self).__init__()

		self.fc1 = MyDense(28*28, 256)
		self.fc2 = MyDense(256, 128)
		self.fc3 = MyDense(128, 64)
		self.fc4 = MyDense(64, 32)
		self.fc5 = MyDense(32, 10)

	def call(self, inputs, training=None):

		x = self.fc1(inputs)
		x = tf.nn.relu(x)
		x = self.fc2(x)
		x = tf.nn.relu(x)
		x = self.fc3(x)
		x = tf.nn.relu(x)
		x = self.fc4(x)
		x = tf.nn.relu(x)
		x = self.fc5(x) 

		return x


network = MyModel()


network.compile(optimizer=optimizers.Adam(lr=0.01),
		loss=tf.losses.CategoricalCrossentropy(from_logits=True),
		metrics=['accuracy']
	)

network.fit(db, epochs=5, validation_data=ds_val,
              validation_freq=2)
 
network.evaluate(ds_val)

sample = next(iter(ds_val))
x = sample[0]
y = sample[1] # one-hot
pred = network.predict(x) # [b, 10]
# convert back to number 
y = tf.argmax(y, axis=1)
pred = tf.argmax(pred, axis=1)

print(pred)
print(y)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值