import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
import jieba
"""数据的特征抽取(对文本等数据进行特征值化),特征预处理,特征降维"""
from sklearn.feature_extraction import DictVectorizer,countvectorizer,TfidfVectorizer
"""特征抽取"""
# 特征抽取的目的是将特征转换为计算机能够理解的数字
# one-hot编码 用于对离散特征进行编码
def dictvec():
"""字典数据抽取"""
# 实例化
vect_1 = DictVectorizer(sparse=False) # sparse=False表示不产生稀疏矩阵
# 调用fit_transform
data = vect_1.fit_transform([{'city':'北京','temperature':100},{'city':'上海','temperature':60},{'city':'深圳','temperature':30}])
print(vect_1.get_feature_names_out())
print(data)
return None
if __name__ == '__main__':
dictvec()
# countvectorizer 用于对文本进行特征值化 单个字母不统计
def countvec():
"""文本特征抽取"""
# 实例化
cv = sklearn.feature_extraction.text.CountVectorizer()
# 调用fit_transform
data = cv.fit_transform(["life is short,i like python","life is too long,i dislike python"])
print(cv.get_feature_names_out())
print(data.toarray())
return None
#jieba分词 用于对中文文本进行分词 词语之间用空格隔开
def cutword():
"""中文文本特征抽取"""
a = jieba.cut("我爱北京天安门")
b = jieba.cut("生活像一把无情刻刀")
c = jieba.cut("我不喜欢日本和服")
# 转换成列表
a_1 = list(a)
b_1 = list(b)
c_1 = list(c)
# 转换成字符串
a_2 = " ".join(a_1)
b_2 = " ".join(b_1)
c_2 = " ".join(c_1)
return a_2,b_2,c_2
def hanzivec():
a_2,b_2,c_2 = cutword()
print(a_2,b_2,c_2)
cv = sklearn.feature_extraction.text.CountVectorizer()
data = cv.fit_transform([a_2,b_2,c_2])
print(cv.get_feature_names_out())
print(data.toarray())
return None
# tf - idf 用于对文本进行特征值化 tf = 词频/总词数 idf = log(总文档数/包含该词的文档数+1) tf-idf = tf * idf
def tfidfvec():
"""中文文本特征抽取"""
a_2,b_2,c_2 = cutword()
print(a_2,b_2,c_2)
cv = sklearn.feature_extraction.text.TfidfVectorizer()
data = cv.fit_transform([a_2,b_2,c_2])
print(cv.get_feature_names_out())
print(data.toarray())
return None
机器学习基础①——特征抽取,文本向量化,one_hot编码,jieba分词,tf-idf词频
最新推荐文章于 2024-05-06 00:08:33 发布