使用tensorflow进行文本的预处理
- tokenizer
- 生成Embedding matrix
使用到的数据集
Quora问答数据集
glove.840B.300d文本Embedding
import pandas as pd
data_df = pd.read_csv("train.csv")
print("数据集中包含 {}行数据,每一行有 {} 个变量".format(data_df.shape[0], data_df.shape[1]))
data_df.head(10)
import csv
question1 = []
question2 = []
is_duplicate = []
with open("train.csv", encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile, delimiter=",")
for row in reader:
question1.append(row["question1"])
question2.append(row["question2"])
is_duplicate.append(row["is_duplicate"])
print("question pairs: %d" % len(question1))
import re
questions = question1 + question2
questions_cleaned = []
for q in questions:
q_list = q.split()
q = []
for word in q_list:
if type(word) is str:
word = word.lower()
q.append(word)
else:
q.append(word)
q = " ".join(q)
q = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", q)
q = re.sub(r"what's", "what is ", q)
q = re.sub(r"\'s", " ", q)
q = re.sub(r"\'ve", " have ", q)
q = re.sub(r"can't", "cannot ", q)
q = re.sub(r"n't", " not ", q)
q = re.sub(r"i'm", "i am ", q)
q = re.sub(r"\'re", " are ", q)
q = re.sub(r"\'d", " would ", q)
q = re.sub(r"\'ll", " will ", q)
questions_cleaned.append(q)
print("Questions: %d" % len(questions_cleaned))
Tokenize
from tensorflow.keras.preprocessing.text import Tokenizer
MAX_NB_WORDS = 200000
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(questions)
question1_word_sequences = tokenizer.texts_to_sequences(question1)
question2_word_sequences = tokenizer.texts_to_sequences(question2)
word_index = tokenizer.word_index
print("Words in index: %d" % len(word_index))
print("Length of question1: %d" % len(question1_word_sequences))
print("First 10 sequence of question1: ")
print(question1_word_sequences[0:10])
Embedding matrix
import numpy as np
embeddings_index = {}
with open("glove.6B.300d.txt", encoding='utf-8') as f:
for line in f:
values = line.split(' ')
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = embedding
print('Word embeddings: %d' % len(embeddings_index))
nb_words = min(MAX_NB_WORDS, len(word_index))
EMBEDDING_DIM = 300
word_embedding_matrix = np.zeros((nb_words+1, EMBEDDING_DIM))
for word, i in word_index.items():
if i>MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
word_embedding_matrix[i] = embedding_vector
print("Word embedding matrix size: {}".format(word_embedding_matrix.shape))
print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0))
from tensorflow.keras.preprocessing.sequence import pad_sequences
MAX_SEQUENCE_LENGTH = 25
q1_data = pad_sequences(question1_word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
q2_data = pad_sequences(question2_word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = np.array(is_duplicate, dtype=int)
print('Shape of question1 data:', q1_data.shape)
print('Shape of question2 data:', q2_data.shape)
print('Shape of label:', labels.shape)
import json
np.save(open("q1_train.npy",'wb'), q1_data)
np.save(open("q2_train.npy",'wb'), q2_data)
np.save(open("label_train.npy", 'wb'), labels)
np.save(open("word_embedding_matrix.npy", 'wb'),word_embedding_matrix)
with open("nb_words.json", 'w') as f:
json.dump({'nb_words': nb_words}, f)