机器学习之特征工程
定义
特征工程是将原始数据转化为更好代表预测模型的潜在问题的特征的过程,从而提高了对位置数据的预测准确性。其包括特征构建、特征提取、特征选择三部分。数据和特征决定了机器学习的上限,而模型和算法只是逼近这个上限而已,成功的结果往往源自最开始对数据的处理。
TF-IDF
TF: term frequency,词的频率,即出现的次数
IDF: inverse document frequency,逆文档频率,log(总文档数量/该词出现的文档数量)
作用:用以评估一字词对于一个文件集或一个语料库中的其中一份文件的重要程度
预处理代码
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.impute import SimpleImputer
import numpy as np
import jieba
from sklearn.feature_selection import VarianceThreshold
from sklearn.decomposition import PCA
def dictvec():
dict = DictVectorizer(sparse=False) //是否输出的是稀疏矩阵
# dict = DictVectorizer()
data = dict.fit_transform([{'city': "北京", 'temperature': 100}, {'city': "上海", 'temperature': 200}, {'city': "桂林", 'temperature': 20}])
lb = dict.get_feature_names()
hy = dict.inverse_transform(data)
print(data)
print(lb)
print(hy)
print(dict.transform([{'foo': 1, "idk": 222}]))
return
def countvec():
count = CountVectorizer()
data = count.fit_transform(['life is a fking movie', '人生如戏呐弟弟', 'life is happy']) //单个字母/中文不统计,中文需先分词再特征提取
print(count.get_feature_names())
print(data.toarray())
return
def hanzivec():
c1, c2, c3 = cutword()
print(c1, c2, c3)
cv = CountVectorizer()
data = cv.fit_transform([c1, c2, c3])
print(cv.get_feature_names())
print(data.toarray())
return
def cutword():
con1 = jieba.cut("写作活动大致可分为“采集—构思—表述”三个阶段")
con2 = jieba.cut("学生在教师指导下按照特定要求用书面语言创造文本")
con3 = jieba.cut("写作是人类精神生活与实践活动的重要组成部分")
content1 = list(con1)
content2 = list(con2)
content3 = list(con3)
c1 = ' '.join(content1)
c2 = ' '.join(content2)
c3 = ' '.join(content3)
return c1, c2, c3
def tfidfvec():
c1, c2, c3 = cutword()
print(c1, c2, c3)
tf = TfidfVectorizer()
data = tf.fit_transform([c1, c2, c3])
print(tf.get_feature_names())
print(data.toarray())
return
def mm():
mm = MinMaxScaler(feature_range=(2, 3)) //定义归一化范围
data = mm.fit_transform([[90, 20, 10, 40], [40, 50, 90, 60], [10, 30, 40, 70]])
print(data)
return
def stand():
std = StandardScaler()
data = std.fit_transform([[1, -1, 3], [2, 5, -4], [3, 4, 2]])
print(data)
return
def spim():
spim = SimpleImputer(missing_values=np.nan, strategy='mean') //也可以用字符NAN等形式
data = spim.fit_transform([[1, 2], [np.nan, 3], [7, np.nan]])
print(data)
return
def var():
// 特征选择删除低方差
var = VarianceThreshold(threshold=1)
data = var.fit_transform([[0, 2, 3, 1], [4, 2, 1, 2], [1, 0, 0, 1]])
print(data)
return
def pca():
pca = PCA(n_components=0.9)
data = pca.fit_transform([[0, 2, 3, 1], [4, 2, 1, 2], [1, 0, 0, 1]])
print(data)
return
# dictvec()
# countvec()
# hanzivec()
# tfidfvec()
# mm()
# stand()
# spim()
# var()
# pca()
注:
1、归一化比标准化更容易受异常值影响;
2、现在Imputer已经被SimpleImputer代替
3、numpy数组中可以使用np.nan或np.NaN来代替缺失值,属于float类型