深度学习笔记5:利用Tensorflow搭建神经网络

def load_dataset():
    with h5py.File('./dataset4/train.h5') as f:
        classes = np.array(f['list_classes'])
        x_train = np.array(f['train_set_x'])
        y_train = np.array(f['train_set_y'])
        
    with h5py.File('./dataset4/test.h5') as f:
        x_test = np.array(f['test_set_x'])
        y_test = np.array(f['test_set_y'])
        
    return x_train, y_train, x_test, y_test, classes    
def convert2onehot(y, c):
    return np.eye(c)[y].T
# 数据集加载
x_train_orig, y_train_orig, x_test_orig, y_test_orig, classes = load_dataset()
# 将图片维度平铺展开
x_train_flatten = x_train_orig.reshape(x_train_orig.shape[0], -1).T
x_test_flatten = x_test_orig.reshape(x_test_orig.shape[0], -1).T
# 归一化
x_train = x_train_flatten/255.
x_test = x_test_flatten/255.
# 将标签转换为独热向量编码
y_train = convert2onehot(y_train_orig, classes.size)
y_test = convert2onehot(y_test_orig, classes.size)
print ("number of training examples = " + str(x_train.shape[1]))
print ("number of test examples = " + str(x_test.shape[1]))
print ("X_train shape: " + str(x_train.shape))
print ("Y_train shape: " + str(y_train.shape))
print ("X_test shape: " + str(x_test.shape))
print ("Y_test shape: " + str(y_test.shape))
def initialize_parameters():
    tf.set_random_seed(1)
    # tf.get_variable()和tf.Variable()的区别:
    # https://blog.csdn.net/qq_33915826/article/details/79793171
    
    # tf.contrib.layers.variance_scaling_initializer()
    # relu最好使用variance_scaling_initializer,tanh和sigmoid最好使用xavier_initializer
    # https://zhuanlan.zhihu.com/p/45199737
    w1 = tf.get_variable("w1", [100, 12288], initializer = tf.contrib.layers.variance_scaling_initializer(seed = 1))
    b1 = tf.get_variable("b1", [100, 1], initializer = tf.zeros_initializer())
    w2 = tf.get_variable("w2", [50, 100], initializer = tf.contrib.layers.variance_scaling_initializer(seed = 1))
    b2 = tf.get_variable("b2", [50, 1], initializer = tf.zeros_initializer())
    w3 = tf.get_variable("w3", [6, 50], initializer = tf.contrib.layers.variance_scaling_initializer(seed = 1))
    b3 = tf.get_variable("b3", [6, 1], initializer = tf.zeros_initializer())
    
    params = {
        'w1': w1,
        'b1': b1,
        'w2': w2,
        'b2': b2,
        'w3': w3,
        'b3': b3,
    }
    
    return params
def forward_propagation(x, params):    
    w1 = params['w1']
    b1 = params['b1']
    w2 = params['w2']
    b2 = params['b2']
    w3 = params['w3']
    b3 = params['b3']

    z1 = tf.add(tf.matmul(w1, x), b1)                    
    a1 = tf.nn.relu(z1)      
    z2 = tf.add(tf.matmul(w2, a1), b2)                
    a2 = tf.nn.relu(z2)                                  
    z3 = tf.add(tf.matmul(w3, a2), b3)                  
    return z3
def compute_cost(z3, y):
    # 计算拟合的结果
    logits = tf.transpose(z3)
    # 实际标签结果
    labels = tf.transpose(y)
    # softmax_cross_entropy_with_logits 先将logits通过softmax计算概率,然后跟labels计算交叉熵损失
    # reduce_mean计算均值,可指定维度
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))    
    return cost
def create_placeholders(n_x, n_y):
    # placeholder生成张量维度,但无具体内容
    x = tf.placeholder(tf.float32, shape=(n_x, None))
    y = tf.placeholder(tf.float32, shape=(n_y, None))
    return x, y
def random_mini_batches(x, y, mini_batch_size = 64, seed = 0):   
    # 样本数
    m = x.shape[1]  
    # 切分样本集合
    mini_batches = []
    # 设置种子
    np.random.seed(seed)
    
    # 打乱原样本集
    permutation = list(np.random.permutation(m))
    shuffled_x = x[:, permutation]
    shuffled_y = y[:, permutation]

    # 切分数据集
    num_minibatches = int(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
    for k in range(num_minibatches):
        mini_batch_x = shuffled_x[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
        mini_batch_y = shuffled_y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
        mini_batch = (mini_batch_x, mini_batch_y)
        mini_batches.append(mini_batch)
    
    # 判断是否有剩余
    if m % mini_batch_size != 0:
        mini_batch_x = shuffled_x[:, num_minibatches * mini_batch_size : m]
        mini_batch_y = shuffled_y[:, num_minibatches * mini_batch_size : m]
        mini_batch = (mini_batch_x, mini_batch_y)
        mini_batches.append(mini_batch)
    
    return mini_batches
def model(x_train, y_train, x_test, y_test, learning_rate = 0.0001, num_epochs = 1500, minibatch_size = 64):
    # 图重置 
    tf.reset_default_graph()
    tf.set_random_seed(1)                          
    seed = 3                                         
    (n_x, m) = x_train.shape                       
    n_y = y_train.shape[0]                          
    costs = []                                   

    # 创建placeholer,方便之后mini-batch
    x, y = create_placeholders(n_x, n_y)    
    # 初始化参数
    params = initialize_parameters()    
    # 前向传播
    z3 = forward_propagation(x, params)
    # 损失函数
    cost = compute_cost(z3, y)    
    # 反向传播,定义优化器
    optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)    
    # 初始化变量
    init = tf.global_variables_initializer()    
    # 开启session计算tensorflow graph
    with tf.Session() as sess:        
        sess.run(init)        
        for epoch in range(num_epochs):
            # 每一轮的损失
            epoch_cost = 0.  
            # 根据batch大小切分成的块数
            num_minibatches = int(m / minibatch_size) 
            # 通过种子,使每次打乱的样本集都不同
            seed = seed + 1
            # 切分数据集
            minibatches = random_mini_batches(x_train, y_train, minibatch_size, seed)            
            for minibatch in minibatches:                
                (minibatch_x, minibatch_y) = minibatch
                # 通过feed_dict填充数据计算损失
                _ , minibatch_cost = sess.run([optimizer, cost], feed_dict={x: minibatch_x, y: minibatch_y})
                # 每一轮损失叠加
                epoch_cost += minibatch_cost / num_minibatches            
            if epoch % 100 == 0: 
                print ("Cost after epoch %i: %f" % (epoch, epoch_cost))           
            if epoch % 5 == 0:
                costs.append(epoch_cost)
        
        # parameters = sess.run(params)  
        # 计算正确率
        # tf.argmax返回张量每个维度最大值索引号
        correct_prediction = tf.equal(tf.argmax(z3), tf.argmax(y))        
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))       
        print ("Train Accuracy:", accuracy.eval({x: x_train, y: y_train}))        
        print ("Test Accuracy:", accuracy.eval({x: x_test, y: y_test}))        
        # return parameters
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值