预测房价 | 实战(一)

1.设置

确保matplotlib工作和写函数来保存图片

保证python2和python3的可以使用
from __future__ import division, print_function, unicode_literals

import numpy as np
import os

# 确保notebook运行时结果稳定
np.random.seed(42)

# 修改默认属性
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)

# 设置储存图片路径
PROJECT_ROOT_DIR = "."
CHAPETER_ID = "end_to_end_project"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)

def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
	path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
	print("Saving figure", fig_id)
	if tight_layout:
	plt.tight_layout()
	plt.savefig(path, format=fig_extension, dpi=resolution)

# 忽略没用的警告
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")

2. 获取数据

import os
import tarfile
from six.moves import urllib

DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"

def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH)
	os.makedirs(housing_path, exist_ok=True) # 创建目录
	tgz_path = os.path.join(housing_path, "housing.tgz") 
	urllib.request.urlretrieve(housing_url, tgz_path) # 从远程获取文件放入指定文件
	housing_tgz = tarfile.open(tgz_path)
	housing_tgz.extractall(path=housing_path) # 解压文件到当前目录
	housing_tgz.close() 
fetch_housing_data()

import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
	csv_path = os.path.join(housing_path, "housing.csv")
	return pd.read_csv(csv_path)

housing = load_housing_data()
housing.head()

在这里插入图片描述

housing.info()

在这里插入图片描述

housing["ocean_proximity"].value_counts()

在这里插入图片描述

housing.describe()

在这里插入图片描述

%matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
save_fig("attribute_histogram_plots")
plt.show()

在这里插入图片描述

# 使每次运行时输出的结果保持一致
np.random.seed(42)

import numpy as np
def split_train_test(data, test_ratio):
	shuffled_indices = np.random.permutation(len(data))
	test_set_size = int(len(data) * test_ratio)
	test_indices = shuffled_indices[:test_set_size]
	train_indices = shuffled_indices[test_set_size:]
	return data.iloc[train_indices], data.iloc[test_indices]

train_set, test_set = split_train_test(housing, 0.2)
print(len(train_set), "train +", len(test_set), "test")

在这里插入图片描述

from zlib import crc32
def test_set_check(identifier, test_ratio):
	return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32

def split_train_test_by_id(data, test_ratio, id_column):
	ids = data[id_column]
	in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio))
	return data.loc[~in_test_set], data.loc[in_test_set]
import hashlib
def test_set_check(identifier, test_ratio, hash=hashlib.md5):
	return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio

def test_set_check(identifier, test_ratio, hash=hashlib.md5):
	return bytearray(hash(np.int64(identifier)).digest())[-1] < 256 * test_ratio

housing_with_id = housing.reset_index()   # adds an `index` column
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")

housing_with_id["id"] = housing["longitude"] * 1000 + housing["latitude"]
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id")

test_set.head()

在这里插入图片描述

from sklearn.model_selection import train_test_split

train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)

test_set.head()

在这里插入图片描述

housing["median_income"].hist()

在这里插入图片描述

# 将数据分成五个区间,并且标签
housing["income_cat"] = pd.cut(housing["median_income"],bins=[0., 1.5, 3.0, 4.5, 6., np.inf],labels=[1, 2, 3, 4, 5])

housing["income_cat"].value_counts()

在这里插入图片描述

using["income_cat"].hist()

在这里插入图片描述

# 交叉验证
from sklearn.model_selection import StratifiedShuffleSplit
for train_index, test_index in split.split(housing, housing["income_cat"]):
	strat_train_set = housing.loc[train_index]
	strat_test_set = housing.loc[test_index]

strat_test_set["income_cat"].value_counts() / len(strat_test_set)

在这里插入图片描述

housing["income_cat"].value_counts() / len(housing)

在这里插入图片描述

def income_cat_proportions(data):
	return data["income_cat"].value_counts() / len(data)

train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)

compare_props = pd.DataFrame({
	"Overall": income_cat_proportions(housing),
	"Stratified": income_cat_proportions(strat_test_set),
	"Random": income_cat_proportions(test_set),
}).sort_index()
compare_props["Rand. %error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100
compare_props["Strat. %error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100

compare_props

在这里插入图片描述

for set_ in (strat_train_set, strat_test_set):
	set_.drop("income_cat", axis=1, inplace=True)

3. 可视化数据,探索联系

housing = strat_train_set.copy()

housing.plot(kind="scatter", x="longitude", y="latitude")
save_fig("bad_visualization_plot")

在这里插入图片描述

housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1)
save_fig("better_visualization_plot")

在这里插入图片描述

housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4, s=housing["population"]/100, label="population", figsize=(10,7), c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True, sharex=False)

plt.legend()
save_fig("housing_prices_scatterplot")

在这里插入图片描述

import matplotlib.image as mpimg
california_img = mpimg.imread(PROJECT_ROOT_DIR + '/images/end_to_end_project/california.png')
ax = housing.plot(kind="scatter", x="longitude", y="latitude", figsize=(10,7), s=housing['population']/100, label="Population", c="median_house_value", cmap=plt.get_cmap("jet"),  colorbar=False, alpha=0.4,)
plt.imshow(california_img, extent=[-124.55, -113.80, 32.45, 42.05], alpha=0.5, cmap=plt.get_cmap("jet"))
plt.ylabel("Latitude", fontsize=14)
plt.xlabel("Longitude", fontsize=14)

prices = housing["median_house_value"]
tick_values = np.linspace(prices.min(), prices.max(), 11)
cbar = plt.colorbar()
cbar.ax.set_yticklabels(["$%dk"%(round(v/1000)) for v in tick_values], fontsize=14)
cbar.set_label('Median House Value', fontsize=16)

plt.legend(fontsize=16)
save_fig("california_housing_prices_plot")
plt.show()
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)

在这里插入图片描述

from pandas.plotting import scatter_matrix
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
save_fig("scatter_matrix_plot")

在这里插入图片描述

housing.plot(kind="scatter", x="median_income", y="median_house_value",alpha=0.1)
plt.axis([0, 16, 0, 550000])
save_fig("income_vs_house_value_scatterplot")

在这里插入图片描述

housing["rooms_per_household"] = housing["total_rooms"]/housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"]
housing["population_per_household"]=housing["population"]/housing["households"]

corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)

在这里插入图片描述

housing.plot(kind="scatter", x="rooms_per_household", y="median_house_value", alpha=0.2)
plt.axis([0, 5, 0, 520000])
plt.show()

在这里插入图片描述

housing.describe()

在这里插入图片描述

4. 预处理数据,为机器学习算法做准备

housing = strat_train_set.drop("median_house_value", axis=1) # 舍弃训练集中的标记
housing_labels = strat_train_set["median_house_value"].copy()

sample_incomplete_rows = housing[housing.isnull().any(axis=1)].head()
sample_incomplete_rows

在这里插入图片描述

sample_incomplete_rows.dropna(subset=["total_bedrooms"]) # option 1
sample_incomplete_rows.drop("total_bedrooms", axis=1) # option 2

在这里插入图片描述

median = housing["total_bedrooms"].median()
sample_incomplete_rows["total_bedrooms"].fillna(median, inplace=True) # option 3
sample_incomplete_rows

在这里插入图片描述

try:
	from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+
except ImportError:
	from sklearn.preprocessing import Imputer as SimpleImputer

imputer = SimpleImputer(strategy="median")

## 要先移除文本属性的值,再插值
housing_num = housing.drop('ocean_proximity', axis=1)
# alternatively: housing_num = housing.select_dtypes(include=[np.number])

# 计算每一列的中间值
imputer.fit(housing_num)

在这里插入图片描述

# 显示计算的每一列的中间值
imputer.statistics_

在这里插入图片描述

X = imputer.transform(housing_num)
housing_tr = pd.DataFrame(X, columns=housing_num.columns, index=housing.index)

housing_tr.loc[sample_incomplete_rows.index.values]

在这里插入图片描述

imputer.strategy

在这里插入图片描述

housing_tr = pd.DataFrame(X, columns=housing_num.columns, index=housing_num.index)
housing_tr.head()

在这里插入图片描述

housing_cat = housing[['ocean_proximity']]
housing_cat.head(10)

在这里插入图片描述

try:
	from sklearn.preprocessing import OrdinalEncoder
except ImportError:
	from future_encoders import OrdinalEncoder # Scikit-Learn < 0.20

ordinal_encoder = OrdinalEncoder()
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
housing_cat_encoded[:10]

在这里插入图片描述

ordinal_encoder.categories_

在这里插入图片描述

try:
	from sklearn.preprocessing import OrdinalEncoder
	from sklearn.preprocessing import OneHotEncoder
except ImportError:
	from future_encoders import OneHotEncoder

cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
housing_cat_1hot

在这里插入图片描述

housing_cat_1hot.toarray()

在这里插入图片描述

cat_encoder = OneHotEncoder(sparse=False)
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
housing_cat_1hot

在这里插入图片描述

cat_encoder.categories_

在这里插入图片描述

housing.columns
from sklearn.base import BaseEstimator, TransformerMixin

# 获得指定列名的下标
rooms_ix, bedrooms_ix, population_ix, household_ix = [list(housing.columns).index(col) for col in ("total_rooms", "total_bedrooms", "population", "households")]

class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
	# 为原数据集添加新的columns,表征新的属性
	def __init__(self, add_bedrooms_per_room = True): # no *args or **kwargs
		self.add_bedrooms_per_room = add_bedrooms_per_room
	def fit(self, X, y=None):
		return self
	def transform(self, X, y=None):
		rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
		population_per_household = X[:, population_ix] / X[:, household_ix]
		if self.add_bedrooms_per_room:
			bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
			return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]
		else:
			return np.c_[X, rooms_per_household, population_per_household]

attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)	
# 另一种方式构建添加新属性的函数
from sklearn.preprocessing import FunctionTransformer

def add_extra_features(X, add_bedrooms_per_room=True):
	rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
	population_per_household = X[:, population_ix] / X[:, household_ix]
	if add_bedrooms_per_room:
		bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
		return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]
	else:
		return np.c_[X, rooms_per_household, population_per_household]
attr_adder = FunctionTransformer(add_extra_features, validate=False,kw_args={"add_bedrooms_per_room": False})
housing_extra_attribs = attr_adder.fit_transform(housing.values)
housing_extra_attribs = pd.DataFrame(housing_extra_attribs,columns=list(housing.columns)+["rooms_per_household", "population_per_household"], index=housing.index)
housing_extra_attribs.head()

在这里插入图片描述

from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler

# 用来预处理数值属性的pipeline
num_pipeline = Pipeline([('imputer', SimpleImputer(strategy="median")),
						('attribs_adder', FunctionTransformer(add_extra_features, validate=False)),
						('std_scaler', StandardScaler()),
						])

housing_num_tr = num_pipeline.fit_transform(housing_num)
housing_num_tr

在这里插入图片描述

try:
	from sklearn.compose import ColumnTransformer
except ImportError:
	from future_encoders import ColumnTransformer # Scikit-Learn < 0.20

num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
full_pipeline = ColumnTransformer([
		("num", num_pipeline, num_attribs),
		("cat", OneHotEncoder(), cat_attribs),
	])

housing_prepared = full_pipeline.fit_transform(housing)
housing_prepared

在这里插入图片描述

housing_prepared.shape

在这里插入图片描述

# 另一种构建处理类别和数值属性的pipeline方式
from sklearn.base import BaseEstimator, TransformerMixin

class OldDataFrameSelector(BaseEstimator, TransformerMixin):
	def __init__(self, attribute_names):
		self.attribute_names = attribute_names
	def fit(self, X, y=None):
		return self
	def transform(self, X):
		return X[self.attribute_names].values

num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]

old_num_pipeline = Pipeline([
		('selector', OldDataFrameSelector(num_attribs)),
		('imputer', SimpleImputer(strategy="median")),
		('attribs_adder', FunctionTransformer(add_extra_features, validate=False)),
		('std_scaler', StandardScaler()),
	])

old_cat_pipeline = Pipeline([
		('selector', OldDataFrameSelector(cat_attribs)),
		('cat_encoder', OneHotEncoder(sparse=False)),
	])


from sklearn.pipeline import FeatureUnion

old_full_pipeline = FeatureUnion(transformer_list=[
		 ("num_pipeline", old_num_pipeline),
		 ("cat_pipeline", old_cat_pipeline),
	])

old_housing_prepared = old_full_pipeline.fit_transform(housing)
old_housing_prepared

在这里插入图片描述

np.allclose(housing_prepared, old_housing_prepared)

在这里插入图片描述

5.选择和训练模型

from sklearn.linear_model import LinearRegression

lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)

在这里插入图片描述

some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)

print("Predictions:", lin_reg.predict(some_data_prepared))

在这里插入图片描述

print("Labels:", list(some_labels))

在这里插入图片描述

some_data_prepared

在这里插入图片描述

from sklearn.metrics import mean_squared_error

housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse

在这里插入图片描述

from sklearn.metrics import mean_absolute_error

lin_mae = mean_absolute_error(housing_labels, housing_predictions)
lin_mae

在这里插入图片描述

from sklearn.tree import DecisionTreeRegressor

tree_reg = DecisionTreeRegressor(random_state=42)
tree_reg.fit(housing_prepared, housing_labels)

在这里插入图片描述

housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
tree_rmse

在这里插入图片描述

6.调整模型参数

from sklearn.model_selection import cross_val_score
scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
tree_rmse_scores = np.sqrt(-scores)

def display_scores(scores):
	print("Scores:", scores)
	print("Mean:", scores.mean())
	print("Standard deviation:", scores.std())

display_scores(tree_rmse_scores)

在这里插入图片描述

lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)

在这里插入图片描述

from sklearn.ensemble import RandomForestRegressor

forest_reg = RandomForestRegressor(n_estimators=10, random_state=42)
forest_reg.fit(housing_prepared, housing_labels)

在这里插入图片描述

housing_predictions = forest_reg.predict(housing_prepared)
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_rmse = np.sqrt(forest_mse)
forest_rmse

在这里插入图片描述

from sklearn.model_selection import cross_val_score 

forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
display_scores(forest_rmse_scores)

在这里插入图片描述

scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
pd.Series(np.sqrt(-scores)).describe()

在这里插入图片描述

from sklearn.svm import SVR

svm_reg = SVR(kernel="linear")
svm_reg.fit(housing_prepared, housing_labels)
housing_predictions = svm_reg.predict(housing_prepared)
svm_mse = mean_squared_error(housing_labels, housing_predictions)
svm_rmse = np.sqrt(svm_mse)
svm_rmse

在这里插入图片描述

from sklearn.model_selection import GridSearchCV
param_grid = [
	# 尝试12(3x4)组超参数组合
	{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
	# 设置bootstap为否,尝试6(2x3)组超参数组合
	{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},
]
	
forest_reg = RandomForestRegressor(random_state=42)

# 总共训练(12+6)x 5=90轮
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error', return_train_score=True)

在这里插入图片描述

grid_search.best_params_

在这里插入图片描述

grid_search.best_estimator_

在这里插入图片描述

cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
	print(np.sqrt(-mean_score), params)

在这里插入图片描述

pd.DataFrame(grid_search.cv_results_)

在这里插入图片描述

from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint

param_distribs = {
		'n_estimators': randint(low=1, high=200),
		'max_features': randint(low=1, high=8),
	}

forest_reg = RandomForestRegressor(random_state=42)
rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs, n_iter=10, cv=5, scoring='neg_mean_squared_error', random_state=42)
rnd_search.fit(housing_prepared, housing_labels)

在这里插入图片描述

cvres = rnd_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)

在这里插入图片描述

feature_importances = grid_search.best_estimator_.feature_importances_
feature_importances

在这里插入图片描述

extra_attribs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"]
cat_encoder = full_pipeline.named_transformers_["cat"]
cat_one_hot_attribs = list(cat_encoder.categories_[0])
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
sorted(zip(feature_importances, attributes), reverse=True)

在这里插入图片描述

final_model = grid_search.best_estimator_

X_test = strat_test_set.drop("median_house_value", axis=1)
y_test = strat_test_set["median_house_value"].copy()

X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)

final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
final_rmse

在这里插入图片描述

from scipy import stats

confidence = 0.95
squared_errors = (final_predictions - y_test) ** 2
mean = squared_errors.mean()
m = len(squared_errors)

np.sqrt(stats.t.interval(confidence, m - 1,
						loc=np.mean(squared_errors),
						scale=stats.sem(squared_errors)))

在这里插入图片描述

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值