import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
dtype = torch.FloatTensor
sentences =["i like dog","i love coffee","i hate milk"]
word_list =" ".join(sentences).split()
word_list =list(set(word_list))
word_dict ={w: i for i, w inenumerate(word_list)}
number_dict ={i: w for i, w inenumerate(word_list)}
n_class =len(word_dict)# number of Vocabulary
# NNLM Parameter
n_step =2# n-1 in paper
n_hidden =2# h in paper
m =2# m in paperdefmake_batch(sentences):
input_batch =[]
target_batch =[]for sen in sentences:
word = sen.split()input=[word_dict[n]for n in word[:-1]]# 写出前(n-1)个词所对应的的位置
target = word_dict[word[-1]]#预测第n个词
input_batch.append(input)
target_batch.append(target)return input_batch, target_batch
# pad documents to a max length of 4 words
max_length =4
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')print(padded_docs)# define the modelinput= Input(shape=(4,))
x = Embedding(vocab_size,8, input_length=max_length)(input)
x = Flatten()(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(inputs=input, outputs=x)# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])# summarize the modelprint(model.summary())# fit the model
model.fit(padded_docs, labels, epochs=50, verbose=0)# evaluate the model
loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)print('Accuracy: %f'%(accuracy *100))
# code by Tae Hwan Jung @graykodeimport numpy as npimport torchimport torch.nn as nnimport torch.optim as optimfrom torch.autograd import Variabledtype = torch.FloatTensorsentences = [ "i like dog", "i love coffee", "i hate milk"]word_list = " "