CIN_layer

5 篇文章 0 订阅

在这里插入图片描述

#coding=utf-8

import tensorflow as tf
from tensorflow.python.keras.layers import Concatenate,Conv1D,Reshape

## 只计算其中一层交叉的结果
def compressed_interaction_net(x0, xl, D, n_filters):
    """
    @param x0: 原始输入
    @param xl: 第l层的输入
    @param D: embedding dim
    @param n_filters: 压缩网络filter的数量
    """
    # 这里设x0中共有m个特征,xl中共有h个特征
    x0_cols = tf.split(x0, D, axis=-1)  # ?, m, D x0 shape: (batch_size, m, d)
    xl_cols = tf.split(xl, D, axis=-1)  # ?, h, D xl shape:  (batch_size, h, d)

    assert len(x0_cols )==len(xl_cols), print("error shape!")

    # 2.遍历D列,对于x0与xl所在的第i列进行外积计算,存在feature_maps中
    feature_maps = []
    for i in range(D):
        feature_map = tf.matmul(xl_cols[i], x0_cols[i], transpose_b=True)  # 外积 ?, h, m
        feature_map = tf.expand_dims(feature_map, axis=-1)  # ?, h, m, 1
        feature_maps.append(feature_map)

    # 3.得到 h × m × D 的三维tensor
    feature_maps = Concatenate(axis=-1)(feature_maps)  # ?, h, m, D

    # 3.压缩网络
    x0_n_feats = x0.get_shape()[1]  # m
    xl_n_feats = xl.get_shape()[1]  # h

    reshaped_feature_maps = Reshape(target_shape=(x0_n_feats * xl_n_feats, D))(feature_maps)  # ?, h*m, D
    transposed_feature_maps = tf.transpose(reshaped_feature_maps, [0, 2, 1])  # ?, D, h*m

    # Conv1D:使用 n_filters 个形状为 1 * (h*m) 的卷积核以 1 为步长, n个filter卷积后输出的向量长度为 1*n_filters
    # 按嵌入维度 D 的方向进行卷积,最终得到形状为 ?, D, n_filters 的输出
    ## Conv1D的使用方法 Conv1D 就类似于加权平均
    new_feature_maps = Conv1D(n_filters, kernel_size=1, strides=1)(transposed_feature_maps)  # ?, D, n_filters

    # 为了保持输出结果最后一维 为嵌入维度 D ,需要进行转置操作
    new_feature_maps = tf.transpose(new_feature_maps, [0, 2, 1])  # ?, n_filters, D

    return new_feature_maps

def compressed_interaction_net_v2(x0, xl, D, n_filters):
    x0_cols = tf.split(x0, D, axis=-1) # x (batch_size, m, D)
    xl_cols = tf.split(xl, D, axis=-1) # x (batch_size, h, D)

    ## (batch_size, dim, h, m)
    feature_map_list = []
    for i in range(D):
        print()
        feature_map = tf.matmul(xl_cols[i], x0_cols[i], transpose_b=True)
        feature_map_list.append(feature_map) ## (batch_size, h, m)

    feature_map_tensor = tf.convert_to_tensor(feature_map_list) #这一步计算 (D, batch_size, h, m)
    transposed_feature_map_tensor = tf.transpose(feature_map_tensor, [1, 0, 2, 3])  # ?, D, h, m

    x0_n_feats = x0.get_shape()[1]  # m
    xl_n_feats = xl.get_shape()[1]  # h

    reshaped_feature_maps = \
        Reshape(target_shape=(D, x0_n_feats * xl_n_feats))(transposed_feature_map_tensor)  #  ?, D,  h*m
    ### 注意 Conv1D每次都是随机初始化的,所以同样的逻辑现实的结果不同
    new_feature_maps = Conv1D(n_filters, kernel_size=1, strides=1)(reshaped_feature_maps)  # ?, D, n_filters
    new_feature_maps = tf.transpose(new_feature_maps, [0, 2, 1])  # ?, n_filters, D

    return new_feature_maps

if __name__ == '__main__':

    ## (batch_size, feature_num, embedding_size)
    x0 = tf.constant([
        [[1, 2, 3, 4],
        [2, 3, 4, 5],
        [3, 4, 5, 3]],

        [[5, 6, 7, 1],
         [8, 9, 10, 11],
         [11, 12, 13, 14]]
    ],  dtype=tf.float32)

    xl = tf.constant([
        [[1, 2, 3, 7],
        [2, 3, 4, 9]],
        [[1, 2, 3, 4],
         [2, 3, 4, 6]]
    ],  dtype=tf.float32)

    res_v1 = compressed_interaction_net(x0, xl, 4, 3)
    print("res_v1:", res_v1)
    
    res_v2 = compressed_interaction_net_v2(x0, xl, 4, 3)
    print("res_v2:",res_v2)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值