from nltk import word_tokenize,WordNetLemmatizer
import pandas as pd
from nltk.corpus import stopwords
import re
from gensim import corpora
import gensim
from gensim.models import word2vec,fasttext
from sklearn.feature_extraction.text import TfidfVectorizer
stoplist = stopwords.words('english')
data_train=pd.read_csv(r'D:\Kaggle\train.tsv',sep='\t')
def clean(data):
all_word = []
for i in data:
i = re.sub('[^a-zA-Z]',' ',i)
word_list = word_tokenize(i)
word_result = [i for i in word_list if i not in stoplist]
if word_result !=[]:
all_word.append(word_result)
return all_word
all_word = clean(data_train.Phrase.values)
# model = word2vec.Word2Vec(all_word,min_count=1,iter=20)
# model.save("word2vec.model") #保存模型
# print(model.wv['right']) #寻找词向量
model1 = fasttext.FastText(all_word,size=100,window=5,min_count=5,workers=4,word_ngrams=1) #词,嵌入大小,前后文词的个数
model1.save("fast_text.model")