近期学习并做了个机器学习实战的小项目,开发环境为jupyter notebook,基于skicit-learn

本文通过使用随机森林回归模型对房价进行预测,详细介绍了数据预处理、特征工程和模型训练的过程。通过对数据集的深入分析,采用特征组合、缺失值处理和标准化等手段,提高了模型的预测准确性。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt #画图工具
from pandas.plotting import scatter_matrix #画图工具
%matplotlib inline
housing_data = pd.read_csv("datasets/housing/housing.csv")
# print(housing_data.head())
# print(housing_data.info())
# print(housing_data["ocean_proximity"].value_counts()) # 查看分类
# print(housing_data.describe())
'''housing_data.hist(bins=50, figsize=(20, 15))
plt.show()'''
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"] #打算查看的属性关系放在列表里
scatter_matrix(housing_data[attributes], figsize=(12, 8)) # 用scatter_matrix查看所需属性之间的关系,可观察到median_house_value与median_income成正比
plt.show()

在这里插入图片描述

housing_data.plot(kind="scatter",x="median_income",y="median_house_value",alpha=0.1) #用plot功能具体画出median_house_value与median_income的关系

在这里插入图片描述

housing_data.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
                      s=housing_data["population"]/100, label="population",
                      c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True)

plt.legend()
plt.show() #用plot画图,横纵坐标为经纬度,像画人口分布一般,半径大小为人口数/100,标签为population,房屋价格用cmap的jet,让房屋价格更明显

在这里插入图片描述

import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit #调用sklearn的模型选择库里的stratifieldshufflesplit来划分训练集,测试集
housing_data["income_cat"] = np.ceil(housing_data["median_income"]/1.5) # 避免收入太大除以1.5取整,分income_cat,为了分类
housing_data["income_cat"].where(housing_data["income_cat"] < 5, 5.0, inplace=True)# 将大于5的都归为5
split = StratifiedShuffleSplit(n_splits=1,test_size=0.2, random_state=42)# 调用函数SSS,test_size=0.2测试集为20%,random_state=42最好不要动
for train_index, test_index in split.split(housing_data, housing_data["income_cat"]):# 根据housing_data的incomecat分,具体我也没太懂
    strat_train_set = housing_data.loc[train_index]
    strat_test_set = housing_data.loc[test_index]
housing_data = strat_train_set.drop("median_house_value",axis=1) # 去掉label数据
housing_label = strat_train_set["median_house_value"].copy() #用来fit的label房价


housing.dropna(subset=["total_rooms"])         #去掉total_rooms里面的NA值
housing.drop("total_bedrooms",axis=1)          #去掉total_bedrooms这一列
median = housing["total_bedrooms"].median()    #取用total_bedrooms的中位数
housing["total_bedrooms"].fillna(median)       #把total_bedrooms的NA值用median填充

在这里插入图片描述

from sklearn.impute import SimpleImputer # 从sklearn的impute库导入SimpleImputer,用来fit
SimpleImputer = SimpleImputer(strategy = "median") # 填充中位数
housing_sum=housing_data.drop(["ocean_proximity"],axis=1) # 去掉文本值,并保存到housing_sum里

SimpleImputer.fit(housing_sum) # 用填充中位数的方法fit,drop后的housing_sum
SimpleImputer.statistics_

X = SimpleImputer.transform(housing_sum) # SimpleImputer的fit与transform两件套,然后transform用东西接着
housing_tr=pd.DataFrame(X,columns=housing_sum.columns) # 改成Dataframe的形式,便于操作,列数为housing_sum的列数
from sklearn.preprocessing import LabelEncoder # 从sklearn.processing导入LabelEncoder转换器,抛弃不用
from sklearn.preprocessing import LabelBinarizer # 文字->整数->onehot编码,标签二进制
encoder = LabelBinarizer(sparse_output=True) # spares_output变成true,变成稀疏矩阵
housing_cat = housing_data["ocean_proximity"]
housing_cat_1hot = encoder.fit_transform(housing_cat) # 用LabelBinarizer转换housing_data的ocean_proximity数据文字-整数-onehot编码
housing_cat_1hot
from sklearn.base import BaseEstimator,TransformerMixin #从sklearn.base导入基础评估改善评估
rooms_ix,bedrooms_ix,population_ix,household_ix = 3,4,5,6 # 对应的列数

class CombinedAttributesAdder(BaseEstimator,TransformerMixin): # 自己编的属性结合器,add_bedrooms_per_room=True对机器学习有好处
    def __init__(self,add_bedrooms_per_room=True):
        self.add_bedrooms_per_room=add_bedrooms_per_room
    def fit(self,X,y=None):
        return self
    def transform(self,X,y=None):
        rooms_per_household=X[:,rooms_ix]/X[:,household_ix]
        population_per_household=X[:,population_ix]/X[:,household_ix]
        if self.add_bedrooms_per_room:
            bedrooms_per_room=X[:,bedrooms_ix]/X[:,rooms_ix]
            return np.c_[X,rooms_per_household,population_per_household,bedrooms_per_room] #np.c_左右列表合并,要求行数相等
        else:
            return np.c_[X,rooms_per_household,population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs=attr_adder.transform(housing_data.values) # .values以列表形式返回所有housing_data的值

在这里插入图片描述

from sklearn.pipeline import Pipeline # 利用pipeline来运作,用到了pipeline,standardscaler
from sklearn.preprocessing import StandardScaler
from sklearn.base import BaseEstimator,TransformerMixin
class DataFrameSelector(BaseEstimator,TransformerMixin):
    def __init__(self,attribute_names):
        self.attribute_names=attribute_names
    def fit(self,X,y=None):
        return self
    def transform(self,X):
        return X[self.attribute_names].values
from sklearn.pipeline import FeatureUnion # 属性结合的pipeline
from sklearn.impute import SimpleImputer
from sklearn.base import TransformerMixin #gives fit_transform method for free
class MyLabelBinarizer(TransformerMixin):# 自己写的
    def __init__(self, *args, **kwargs):
        self.encoder = LabelBinarizer(*args, **kwargs)
    def fit(self, x, y=0):
        self.encoder.fit(x)
        return self
    def transform(self, x, y=0):
        return self.encoder.transform(x)


num_attribs = list(housing_sum)
cat_attribs = ["ocean_proximity"]

num_pipeline = Pipeline([
    ('selector',DataFrameSelector(num_attribs)),
    ('imputer', SimpleImputer(strategy="median")),
    ('attribs_adder',CombinedAttributesAdder()),
    ('std_scaler',StandardScaler())
])

cat_pipeline = Pipeline([
    ('selector',DataFrameSelector(cat_attribs)),
    ('label_binarizer',MyLabelBinarizer()),
])

full_pipeline=FeatureUnion(transformer_list=[
    ("num_pipeline",num_pipeline),
    ("cat_pipeline",cat_pipeline),
])
housing_prepared = full_pipeline.fit_transform(housing_data) # 准备好的数据由pipeline产生
from sklearn.ensemble import RandomForestRegressor # 随即森林回归的模型
forest_reg = RandomForestRegressor()
forest_reg.fit(housing_prepared,housing_label)
from sklearn.metrics import mean_squared_error # 平均方差误差
housing_predictions = forest_reg.predict(housing_prepared) # 预测值
forest_mse=mean_squared_error(housing_label,housing_predictions) #预测值与实际的差距
forest_rmse=np.sqrt(forest_mse)
forest_rmse
from sklearn.model_selection import cross_val_score
scores = cross_val_score(forest_reg,housing_prepared,housing_label,scoring="neg_mean_squared_error",cv=10) # 交叉验证
rmse_scores=np.sqrt(-scores)
def display_scores(scores):
    print("Scores",scores)
    print("Mean:",scores.mean())
    print("Standard deviation:",scores.std())
display_scores(rmse_scores) # 查看得分

在这里插入图片描述

from sklearn.model_selection import GridSearchCV  # 网格搜索,调整最好参数,找到最好估算器
param_grid = [
    {'n_estimators':[3,10,30],'max_features':[2,4,6,8]},
    {'bootstrap':[False],'n_estimators':[3,10],'max_features':[2,3,4]},
]
forest_reg=RandomForestRegressor()
grid_search=GridSearchCV(forest_reg,param_grid,cv=5,scoring='neg_mean_squared_error')
grid_search.fit(housing_prepared,housing_label)
grid_search.best_params_
grid_search.best_estimator_# 最好的估算器
cvres=grid_search.cv_results_
for mean_score,params in zip(cvres["mean_test_score"],cvres["params"]):
    print(np.sqrt(-mean_score),params)
feature_importances = grid_search.best_estimator_.feature_importances_
feature_importances
extra_attribs=["room_per_hhold","pop_per_hhold","bedrooms_per_room"]
cat_one_hot_attribs = list(encoder.classes_)
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
sorted(zip(feature_importances,attributes),reverse=True) #重要性
# 开始测试!!
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop("median_house_value",axis=1)
Y_test = strat_test_set["median_house_value"].copy()
X_test_prepared=full_pipeline.transform(X_test)
final_predictions=final_model.predict(X_test_prepared)
final_mse=mean_squared_error(Y_test,final_predictions)
final_rmse=np.sqrt(final_mse)
final_rmse ##预测房价!!!!!!!!

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值