Predict.py

这是一个使用Python和Keras进行预测的教程。首先,设置了设备为CPU,并导入了必要的库。然后加载了预训练的模型参数。模型用于从描述中生成新闻标题,输入数据经过处理,包括填充、截断和数据预处理。最后,通过模型生成了一些样例预测标题。
摘要由CSDN通过智能技术生成
# coding: utf-8

# In[1]:


FN = 'predict'


# if your GPU is busy you can use CPU for predictions

# In[2]:


import os
os.environ['THEANO_FLAGS'] = 'device=cpu,floatX=float32'


# In[3]:


import theano
DEBUG = False
if DEBUG:
    import warnings
    warnings.filterwarnings('ignore','.*Warning, Cannot compute test value')
    warnings.filterwarnings('ignore','.*has no test value')
#     theano.config.optimizer='fast_compile'
#     theano.config.exception_verbosity='high'
    theano.config.optimizer='None'
    theano.config.compute_test_value = 'warn'


# In[4]:


import keras
keras.__version__


# Generate headlines using the "simple" model from http://arxiv.org/pdf/1512.01712v1.pdf

# Use indexing of tokens from [vocabulary-embedding](./vocabulary-embedding.ipynb) this does not clip the indexes of the words to `vocab_size`.
# 
# Use the index of outside words to replace them with several `oov` words (`oov` , `oov0`, `oov`...) that appear in the same description and headline. This will allow headline generator to replace the oov with the same word in the description

# In[5]:


FN0 = 'vocabulary-embedding'


# we will generate predictions using the model generated in this notebook

# In[6]:


FN1 = 'train'


# input data (`X`) is made from `maxlend` description words followed by `eos`
# followed by headline words followed by `eos`
# if description is shorter than `maxlend` it will be left padded with `empty`
# if entire data is longer than `maxlen` it will be clipped and if it is shorter it will be padded.
# 
# labels (`Y`) are the headline words followed by `eos` and clipped or padded to `maxlenh`
# 
# In other words the input is made from a `maxlend` half in which the description is padded from the left
# and a `maxlenh` half in which `eos` is followed by a headline followed by another `eos` if there is enough space.
# 
# The labels match only the second half and 
# the first label matches the `eos` at the start of the second half (following the description in the first half)

# the model parameters should be identical with what used in training but notice that `maxlend` is flexible

# In[7]:


maxlend=50 # 0 - if we dont want to use description at all
maxlenh=25
maxlen = maxlend + maxlenh
rnn_size = 512
rnn_layers = 3  # match FN1
batch_norm=False


# the out of the first `activation_rnn_size` nodes from the top layer will be used for activation and the rest will be used to select predicted word

# In[8]:


activation_rnn_size = 40 if maxlend else 0


# In[9]:


# training parameters
seed=42
p_W, p_U, p_dense, weight_decay = 0, 0, 0, 0
optimizer = 'adam'
batch_size=64


# In[10]:


nb_train_samples = 30000
nb_val_samples = 3000


# # read word embedding

# In[11]:


import cPickle as pickle

with open('data/%s.pkl'%FN0, 'rb') as fp:
    embedding, idx2word, word2idx, glove_idx2idx = pickle.load(fp)
vocab_size, embedding_size = embedding.shape


# In[12]:


nb_unknown_words = 10


# In[13]:


print ('dimension of embedding space for words',embedding_size)
print ('vocabulary size', vocab_size, 'the last %d words can be used as place holders for unknown/oov words'%nb_unknown_words)
print ('total number of different words',len(idx2word), len(word2idx))
print ('number of words outside vocabulary which we can substitue using glove similarity', len(glove_idx2idx))
print ('number of words that will be regarded as unknonw(unk)/out-of-vocabulary(oov)',len(idx2word)-vocab_size-len(glove_idx2idx))


# In[14]:


for i in range(nb_unknown_words):
    idx2word[vocab_size-1-i] = '<%d>'%i


# In[15]:


for i in range(vocab_size-nb_unknown_words, len(idx2word)):
    idx2word[i] = idx2word[i]+'^'


# In[16]:


empty = 0
eos = 1
idx2word[empty] = '_'
idx2word[eos] = '~'


# In[17]:


import numpy as np
from keras.preprocessing import sequence
from keras.utils import np_utils
import random, sys


# In[18]:


def prt(label, x):
    print (label+':'),
    for w in x:
        print (idx2word[w]),
    print


# # Model

# In[19]:


from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout, RepeatVector
from keras.layers.recurrent import LSTM
from keras.layers.embeddings import Embedding
from keras.regularizers import l2
from keras.layers.core import Lambda
import keras.backend as K


# In[20]:


# seed weight initialization
random.seed(seed)
np.random.seed(seed)


# In[21]:


regularizer = l2(weight_decay) if weight_decay else None


# ## rnn model

# start with a stacked LSTM, which is identical to the bottom o
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值