数据标签二值化
类似于编码。
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
features = np.array([["Texas"],
["California"],
["Texas"],
["Delaware"],
["Texas"]])
# 标签矩阵二值化
one_hot = LabelBinarizer()
one_hot.fit_transform(features)
print(one_hot.fit_transform(features))
如果feature中一个向量有两个值,则需要MultiLabelBinarizer。
手动对标签加数值
dataframe = pd.DataFrame({"Score": ["Low", "Low", "Medium", "Medium", "High"]})
scale_mapper = {"Low": 1,
"Medium": 2,
"High": 3}
print(dataframe["Score"].replace(scale_mapper))
字典二值化
from sklearn.feature_extraction import DictVectorizer
data_dict = [{"Red": 2, "Blue": 4},
{"Red": 4, "Blue": 3},
{"Red": 1, "Yellow": 2},
{"Red": 2, "Yellow": 2}]
# 默认输出稀疏矩阵,故改为False
dictvectorize = DictVectorizer(sparse=False)
features = dictvectorize.fit_transform(data_dict)
print(features)
处理有缺失值的数据
使用KNN对其进行预测。
from sklearn.neighbors import KNeighborsClassifier
# 已分类的数据集
x = np.array([[0, 2.1, 1.45],
[1, 1.18, 1.33],
[0, 1.22, 1.27],
[1, -0.21, -1.19]])
# 有所缺失
x_with_nan = np.array([[np.nan, 0.87, 1.31],
[np.nan, -0.67, -0.22]])
# 默认选取3个k点位
clf = KNeighborsClassifier(3, weights='distance')# uniform是均等的权重,distance是不均等的权重
trained_model = clf.fit(x[:, 1:], x[:, 0])
# 预测
input_values = trained_model.predict(x_with_nan[:, 1:])
# 加入预测列
x_with_inputed = np.hstack((input_values.reshape(-1, 1), x_with_nan[:, 1:]))
# 沿竖直方向堆叠
print(np.vstack((x_with_inputed, x)))