使用tensorflow进行文本的预处理

使用tensorflow进行文本的预处理

  1. tokenizer
  2. 生成Embedding matrix

使用到的数据集

Quora问答数据集

glove.840B.300d文本Embedding

import pandas as pd

data_df = pd.read_csv("train.csv")
print("数据集中包含 {}行数据,每一行有 {} 个变量".format(data_df.shape[0], data_df.shape[1]))
data_df.head(10)
import csv

question1 = []
question2 = []
is_duplicate = []
with open("train.csv", encoding='utf-8') as csvfile:
    reader = csv.DictReader(csvfile, delimiter=",")
    for row in reader:
        question1.append(row["question1"])
        question2.append(row["question2"])
        is_duplicate.append(row["is_duplicate"])
print("question pairs: %d" % len(question1))
import re


questions = question1 + question2
questions_cleaned = []

for q in questions:
    q_list = q.split()
    q = []
    for word in q_list:
        if type(word) is str:
            word = word.lower()
            q.append(word)
        else:
            q.append(word)
    q = " ".join(q)
    
    q = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", q)
    q = re.sub(r"what's", "what is ", q)
    q = re.sub(r"\'s", " ", q)
    q = re.sub(r"\'ve", " have ", q)
    q = re.sub(r"can't", "cannot ", q)
    q = re.sub(r"n't", " not ", q)
    q = re.sub(r"i'm", "i am ", q)
    q = re.sub(r"\'re", " are ", q)
    q = re.sub(r"\'d", " would ", q)
    q = re.sub(r"\'ll", " will ", q)
    questions_cleaned.append(q)
    
print("Questions: %d" % len(questions_cleaned))

Tokenize

from tensorflow.keras.preprocessing.text import Tokenizer

MAX_NB_WORDS = 200000

tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(questions)
question1_word_sequences = tokenizer.texts_to_sequences(question1)
question2_word_sequences = tokenizer.texts_to_sequences(question2)
word_index = tokenizer.word_index

print("Words in index: %d" % len(word_index))
print("Length of question1: %d" % len(question1_word_sequences))
print("First 10 sequence of question1: ")
print(question1_word_sequences[0:10])

Embedding matrix

import numpy as np

embeddings_index = {}
with open("glove.6B.300d.txt", encoding='utf-8') as f:
    for line in f:
        values = line.split(' ')
        word = values[0]
        embedding = np.asarray(values[1:], dtype='float32')
        embeddings_index[word] = embedding
print('Word embeddings: %d' % len(embeddings_index))

nb_words = min(MAX_NB_WORDS, len(word_index))
EMBEDDING_DIM = 300
word_embedding_matrix = np.zeros((nb_words+1, EMBEDDING_DIM))
for word, i in word_index.items():
    if i>MAX_NB_WORDS:
        continue
    embedding_vector = embeddings_index.get(word)
    if embedding_vector is not None:
        word_embedding_matrix[i] = embedding_vector
print("Word embedding matrix size: {}".format(word_embedding_matrix.shape))
print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0))
from tensorflow.keras.preprocessing.sequence import pad_sequences


MAX_SEQUENCE_LENGTH = 25

q1_data = pad_sequences(question1_word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
q2_data = pad_sequences(question2_word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = np.array(is_duplicate, dtype=int)

print('Shape of question1 data:', q1_data.shape)
print('Shape of question2 data:', q2_data.shape)
print('Shape of label:', labels.shape)
import json
np.save(open("q1_train.npy",'wb'), q1_data)
np.save(open("q2_train.npy",'wb'), q2_data)
np.save(open("label_train.npy", 'wb'), labels)
np.save(open("word_embedding_matrix.npy", 'wb'),word_embedding_matrix)
with open("nb_words.json", 'w') as f:
    json.dump({'nb_words': nb_words}, f)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值