tensorflow2 调用循环内存逐渐占满

在meshwalker上进行梯度迭代的攻击,需要循环计算loss 对 输入的梯度,但是每次循环都会使内存(不是显存)占用增加,最终内存被填满,程序被杀死。

每进行一次 step 循环内存占用都会增加一点。

原因: tensorflow中每个操作(加减乘除)都会被添加为计算节点,每次循环都会增加新的计算节点,导致内存增加。

解决方法: 尝试每循环一次清空一次计算节点, 使用:
tf.compat.v1.reset_default_graph()
tf.keras.backend.clear_session()
但是无效,可能的原因是:这些计算节点并不是加到了计算图中,所以上述两个命令无效。

  for step in range(steps):

    with tf.GradientTape(watch_accessed_variables=False) as tape:  #
        #tf.keras.backend.clear_session()
        #tf.compat.v1.reset_default_graph()



        tape.watch(vertices)

        L2_dis_loss = tf.norm(begin_vs - vertices, ord=2)
        HD = hausdorff_distance(begin_vs.numpy(), vertices.numpy(), distance='euclidean')
        CD = 0

        kappa_loss = tf.Variable(0, dtype=tf.float64)

        # 曲率损失 kappa cur
        for i in range(252):
            # benign_one_points_kappa = tf.Variable(0, dtype=tf.float64)
            # for j in trimesh_file.vertex_neighbors[i]:
            #     vectors = begin_vs[j] - begin_vs[i]
            #
            #     #show_pl1(vertices.numpy(), [i], None)  # trimesh_file.vertex_neighbors[i]
            #     vectors = vectors / tf.norm(vectors, ord=2)
            #     benign_one_points_kappa = benign_one_points_kappa + vectors * trimesh_file.vertex_normals[i]

            adv_one_points_kappa = tf.Variable(0, dtype=tf.float64)
            for j in trimesh_file.vertex_neighbors[i]:
                vectors = vertices[j] - vertices[i]

                #show_pl1(vertices.numpy(), [i], None)  # trimesh_file.vertex_neighbors[i]
                vectors = vectors / tf.norm(vectors, ord=2)
                adv_one_points_kappa = adv_one_points_kappa + vectors * trimesh_file.vertex_normals[i]

            kappa_loss = kappa_loss + tf.norm(benign_one_points_kappa_seq[i] - adv_one_points_kappa, ord=2)

        ###########################################
        # begin lod cur

        lod_cur_loss = com_lod_cur_loss(vertices, faces, faces_sequenc_index_sqe, point_edge_double_se_index_sqe, begin_one_point_gurvatur_sqe)

        # lod_cur_loss = tf.Variable(0, dtype = tf.float64)
        # for i in range(test_num_v):
        #     area_sum = tf.Variable(0, dtype=tf.float64)
        #     for faces_index in faces_sequenc_index_sqe[i]:
        #         point1 = vertices[faces[faces_index][0]]
        #         point2 = vertices[faces[faces_index][1]]
        #         point3 = vertices[faces[faces_index][2]]
        #         # 计算该点对应的面积
        #         area_sum = area_sum + calculate_area(point1, point2, point3)
        #     # 计算每一点对应的夹角
        #     angle_sum = tf.Variable(0, dtype=tf.float64)
        #     for point_edge_index_iter_double in point_edge_double_se_index_sqe[i]:
        #         edge1 = vertices[point_edge_index_iter_double[0][0]] - vertices[point_edge_index_iter_double[0][1]]
        #         edge2 = vertices[point_edge_index_iter_double[1][0]] - vertices[point_edge_index_iter_double[1][1]]
        #         angle_sum = angle_sum + calculate_angle(edge1, edge2)
        #     adv_one_point_gurvatur = tf.norm(tf.Variable(360, dtype=tf.float64) - angle_sum, ord = 2) / area_sum
        #
        #     lod_cur_loss = lod_cur_loss + tf.norm(begin_one_point_gurvatur_sqe[i] - adv_one_point_gurvatur, ord=2)


        # 边特征损失
        edges_loss = tf.Variable(0, dtype=tf.float64)

        for i in range(len(edges)):
            benign_one_edges_vector = begin_vs[edges[i][0]] - begin_vs[edges[i][1]]
            adv_one_edges_vector = vertices[edges[i][0]] - vertices[edges[i][1]]
            benign_one_edges_len = tf.norm(begin_vs[edges[i][0]] - begin_vs[edges[i][1]], ord=2)
            adv_one_edges_len = tf.norm(vertices[edges[i][0]] - vertices[edges[i][1]], ord=2)

            one_edges_angle_loss = tf.norm(tf.linalg.cross(benign_one_edges_vector, adv_one_edges_vector), ord=2)
            one_edges_len_loss = tf.norm(benign_one_edges_len - adv_one_edges_len, ord=2)

            edges_loss = edges_loss + (one_edges_angle_loss + one_edges_len_loss)

        # 面特征损失
        normal_angle_loss = tf.Variable(0, dtype=tf.float64)
        center_position_loss = tf.Variable(0, dtype=tf.float64)
        for i in range(len(faces)):

            benign_one_faces_normal = tf.multiply((begin_vs[faces[i][0]] - begin_vs[faces[i][1]]), (begin_vs[faces[i][0]] - begin_vs[faces[i][2]]))
            adv_one_faces_normal = tf.multiply((vertices[faces[i][0]] - vertices[faces[i][1]]), (vertices[faces[i][0]] - vertices[faces[i][2]]))
            one_normal_angle_loss = tf.norm(tf.linalg.cross(benign_one_faces_normal, adv_one_faces_normal),ord=2)
            normal_angle_loss = normal_angle_loss + one_normal_angle_loss

            benign_one_faces_center = (begin_vs[faces[i][0]] + begin_vs[faces[i][1]] + begin_vs[faces[i][2]]) / 3
            adv_one_faces_center = (vertices[faces[i][0]] + vertices[faces[i][1]] + vertices[faces[i][2]]) / 3
            one_center_position_loss = tf.norm(benign_one_faces_center - adv_one_faces_center, ord=2)
            center_position_loss = center_position_loss + one_center_position_loss
        faces_loss = normal_angle_loss + center_position_loss

        # 分类损失
        name, ftrs, gt = generate_walk(fn, vertices, faces, edges, tf.Variable(data['label']), tf.Variable([params_idx]), dataset_params_list)
        predictions = dnn_model(ftrs, classify=True, training=False)
        mean_pred = tf.reduce_mean(predictions, axis=0)
        max_hit = np.argmax(mean_pred)  # 预测类别
        loss = tf.keras.losses.SparseCategoricalCrossentropy()
        attack_loss = loss(Target, mean_pred)

        attack_loss = tf.cast(attack_loss, dtype=tf.float64)
        #adv_loss = tf.add(attack_loss, faces_loss, edges_loss, kappa_loss)
        adv_loss = attack_weight * attack_loss + face_weight * faces_loss + edge_weight * edges_loss + cur_weight * (kappa_loss + lod_cur_loss)
        print('adv_loss:{}, attack_loss:{}, faces_loss:{}, edge_loss:{}, kappa_loss:{}, lod_cur_loss:{}, L2_loss:{}, pred:{}'.format(adv_loss, attack_loss, faces_loss, edges_loss,kappa_loss, lod_cur_loss, L2_dis_loss, max_hit))
        if max_hit == Target:
            is_attack_success = 1


    if is_attack_success == 0:
        gradient = tape.gradient(lod_cur_loss, vertices)
        #print(gradient)
        gradient = tf.where(tf.math.is_nan(gradient), 0., gradient)
        optimizer.apply_gradients(grads_and_vars = [(gradient, vertices)])
        #tape.reset()
        #tf.compat.v1.reset_default_graph()
        tf.compat.v1.reset_default_graph()
        tf.keras.backend.clear_session()

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值