python history没有定义_Keras自定义图层-AttributeError:“ Tensor”对象没有属性“ _keras_history”...

So big picture, I'm trying to make a keras w2v auto-encoder. I tried to follow the CustomVariationalLayer class from this official example.

My class is this:

class custom_ae_layer(Layer):

"""custom keras layer to handle looking up wv inputs

example from https://github.com/fchollet/keras/blob/master/examples/variational_autoencoder.py

"""

def __init__(self, **kwargs):

self.is_placeholder = True

super(custom_ae_layer, self).__init__(**kwargs)

def ae_loss(self, reconstruction,emb_lookup):

loss = K.sum(emb_lookup - reconstruction,axis=-1)

return K.mean(loss)

def call(self, inputs):

reconstruction = inputs[1]

emb_lookup = inputs[0]

loss = self.ae_loss(emb_lookup,reconstruction)

self.add_loss(loss)

return emb_lookup

This error occurs regardless of if I return emb_lookup or reconstruction. The major difference between my layer and the official example is I use an embedding lookup as an input, which is the output of the keras.layers.Embedding object, and reconstruction is

recon_layer = Dense(outshape, activation="tanh",kernel_regularizer=l2(in_args.l2_rate))(deconv_input)

s_recon_layer = K.squeeze(recon_layer,2)

This error occurs regardless of if I return emb_lookup or reconstruction.

Full error message is this:

Traceback (most recent call last):

File "semi_sup_cnn_big_data_test.py", line 166, in

main()

File "semi_sup_cnn_big_data_test.py", line 84, in main

args,run_time,micro,macro = basic_cnn_train_val_test(args)

File "semi_sup_cnn_big_data_test.py", line 100, in basic_cnn_train_val_test

clf,args = init_export_network(args)

File "/home/qqi/git/MPI_CNN/models/auto_encoder_multilayer_cnn.py", line 257, in init_export_network

model = Model(model_input, y)

File "/usr/local/lib/python3.5/dist-packages/keras/legacy/interfaces.py", line 88, in wrapper

return func(*args, **kwargs)

File "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1705, in __init__

build_map_of_graph(x, finished_nodes, nodes_in_progress)

File "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1695, in build_map_of_graph

layer, node_index, tensor_index)

File "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1665, in build_map_of_graph

layer, node_index, tensor_index = tensor._keras_history

AttributeError: 'Tensor' object has no attribute '_keras_history'

As requested, here is the full init_export_network function:

def init_export_network(in_args):

import_dir = os.path.join('cv_data',

in_args.data_name,

in_args.label_name,

in_args.this_fold)

# set output dir as models/[model_name]/[data_name]/[label_file_name]/[this_fold]

output_dir = os.path.join("initialized_models",

in_args.model_name,

in_args.data_name,

in_args.label_name,

in_args.this_fold)

print("exporting to", output_dir)

if not os.path.exists(output_dir):

os.makedirs(output_dir)

else:

print(output_dir, "data dir identified but will be re-populated")

shutil.rmtree(output_dir)

os.makedirs(output_dir)

"returns base cnn architecture and placeholder/untrained weights"

# unpckl wv_matrix, class_names

wv_matrix = unpckl(os.path.join(import_dir,'wv_matrix.pickle'))

print("valid pre-processed data found in", import_dir)

# define network layers ----------------------------------------------------

input_shape = (in_args.seq_len,)

output_shape = (in_args.seq_len,len(wv_matrix[0]),)

emb_size = len(wv_matrix[0])

model_input = Input(shape=input_shape)

emb_lookup = Embedding(len(wv_matrix),

len(wv_matrix[0]),

embeddings_regularizer=l2(in_args.emb_l2_rate),

input_length=in_args.seq_len, name="embedding")(model_input)

#emb_lookup = Embedding(len(wv_matrix), len(wv_matrix[0]), input_length=in_args.seq_len, name="embedding", )(model_input)

if in_args.emb_dropout:

emb_lookup = Dropout(in_args.emb_dropout)(emb_lookup)

conv_blocks = []

# conv blocks --------------------------------------------------------------

print("emb_lookup shape!!!!",emb_lookup.shape)

for ith_conv,sz in enumerate(in_args.filter_sizes):

if ith_conv == 0:

conv_input = emb_lookup

else:

conv_input = conv

conv = Convolution1D(filters=in_args.feat_maps[ith_conv],

kernel_size=sz,

padding="valid",

activation="relu",

kernel_initializer = 'lecun_uniform',

kernel_regularizer=l2(in_args.l2_rate),

strides=1,

name = "{}_conv".format(ith_conv))(conv_input)

print("{}_conv".format(ith_conv), conv.shape)

# deconv blocks with dimensions reverse of multilayer_cnn ------------------

deconv_blocks = []

deconv_filter_sizes = in_args.filter_sizes

deconv_filter_sizes.reverse()

#print("conv_shape!!!", conv.shape)

conv_input = conv

print("conv_upsampling_shape!!!", conv_input.shape)

#unpool_shape = ((conv[1],-1,conv[2]))

#conv_input = Reshape((1,conv_input[1],conv_input[2]))(conv_input)

#print("conv_input_shape!!!", conv_input.shape)

#conv_input = Reshape(unpool_shape),conv_input

#conv_input = Reshape(unpool_shape)(conv_input)

deconv_input=K.expand_dims(conv_input,2)

print("conv_reshape_shape!!!", conv_input)

for ith_conv,sz in enumerate(deconv_filter_sizes):

print("{}_deconv input shape!!!".format(ith_conv), deconv_input)

deconv = Conv2DTranspose(filters=in_args.feat_maps[ith_conv],

kernel_size=(sz,1),

#kernel_size=sz,

padding="valid",

activation="relu",

kernel_initializer = 'lecun_uniform',

kernel_regularizer=l2(in_args.l2_rate),

strides=(1,1),

name = "{}_deconv".format(ith_conv))(deconv_input)

deconv_input = deconv

print("{}_deconv input shape!!!".format(ith_conv), deconv_input)

print("deconv_output shape",deconv)

#z = Flatten()(conv)

#deconv_out = Flatten(deconv)

#outshape = (in_args.seq_len,len(wv_matrix[0]))

outshape = len(wv_matrix[0])

recon_layer = Dense(outshape, activation="tanh",kernel_regularizer=l2(in_args.l2_rate))(deconv_input)

print("recon_layer shape",recon_layer)

#s_recon_layer = K.squeeze(recon_layer,2)

s_recon_layer = Lambda(lambda x: K.squeeze(x, 2))(recon_layer)

print("squeezed recon_layer shape",s_recon_layer)

#print("conv_reshape_shape!!!", conv_input.shape)(conv)

# end define network layers ------------------------------------------------

#model_output = Dense(outshape, activation="elu",kernel_regularizer=l2(in_args.l2_rate))(z)

y = custom_ae_layer()([model_input,emb_lookup,s_recon_layer])

model = Model(model_input, y)

# finished network layers definition - compile network

opt = optimizers.Adamax()

model.compile(loss=None, optimizer='adamax')

embedding_layer = model.get_layer("embedding")

embedding_layer.set_weights([wv_matrix])

# load wv_matrix into embedidng layer

print("Initializing embedding layer with word2vec weights, shape", wv_matrix.shape)

# save model architecture as json

open(os.path.join(output_dir,"structure.json"),"w").write(model.to_json())

# save initialized model weights as .hdf5fmacro

model.save_weights(os.path.join(output_dir, "weights"+".hdf5"))

print("multilayer network/initial weights successfully saved in", output_dir)

print(in_args)

#print(model.summary())

return model,in_args

解决方案

The error message looks pretty much similar to this question: https://stackoverflow.com/a/45309816/1531463

In short, I think you need to wrap this line:

s_recon_layer = K.squeeze(recon_layer,2)

(or any other backend function calls) into a Lambda layer.

Specifically,

s_recon_layer = Lambda(lambda x: K.squeeze(x, 2))(recon_layer)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值