【NLP】英文数据预处理__词频统计简例

注:此处使用Gensim包处理后的“data_lemmatized”(再经简单处理)作为词频统计的输入数据“features”

import collections
features=['kansai', 'electric', 'back','electric']#输入数据的格式
def train(features):
    model = collections.defaultdict(lambda: 1)
    for f in features:
        model[f] += 1
    ff = open('输出路径.txt', 'w', encoding='utf-8')
    ff.write(str(model))
    ff.close()
    #print(model)
    return model


train(features)

获取Gensim包处理后的“data_lemmatized”(再经简单处理)

#引入包、导入数据
import re
import numpy as np
import pandas as pd
from pprint import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
import collections

nlp = spacy.load('en', disable=['parser', 'ner'])
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim  # don't skip this
import matplotlib.pyplot as plt
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
#导入NLTK停用词包
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'be','have','com','www','can','mekong','dam'])#自行扩充停用词表

#data=("I LOVE apples#   &   3241","he likes PIG3s","she do not like anything,except apples.\.")
f=open('xxx.txt','r',encoding='utf-8')
data=f.readlines()
#注意:f.read()返回字符串,f.readlines()返回列表

#大小写转换
#去特殊符号
def sent_to_words(sentences):
    for sentence in sentences:
        yield(gensim.utils.simple_preprocess(str(sentence), deacc=True))  # deacc=True removes punctuations
data_words = list(sent_to_words(data))
#print(data_words)

#去停用词
def remove_stopwords(texts):
    return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
data_words_nostops = remove_stopwords(data_words)


#只保留POS处理后的n、v、adj、adv,再做词形还原
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
    """https://spacy.io/api/annotation"""
    texts_out = []
    for sent in texts:
        doc = nlp(" ".join(sent))
        texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
    return texts_out
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_nostops, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])

ff=open('xxx.txt','w',encoding='utf-8')
ff.writelines(str(data_lemmatized))
ff.close()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值