# Code you have previously used to load dataimport pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
# Path of the file to read. We changed the directory structure to simplify submitting to a competition
iowa_file_path = r'G:/kaggle/housePrice/train.csv'
home_data = pd.read_csv(iowa_file_path)# Create target object and call it y
y = home_data.SalePrice
# Create X
features =['LotArea','YearBuilt','1stFlrSF','2ndFlrSF','FullBath','BedroomAbvGr','TotRmsAbvGrd','MSSubClass','OverallQual','OverallCond','YearRemodAdd','MasVnrArea','BsmtFullBath','BsmtHalfBath','HalfBath','KitchenAbvGr','TotRmsAbvGrd','Fireplaces','GarageCars','GarageArea','PoolArea']
X = home_data[features]#handle NaN , mean values every columnfrom sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
X = my_imputer.fit_transform(X)# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)# Specify Model
iowa_model = DecisionTreeRegressor(random_state=1)# Fit Model
iowa_model.fit(train_X, train_y)# Make validation predictions and calculate mean absolute error
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)print("Validation MAE when not specifying max_leaf_nodes: {:,.0f}".format(val_mae))# Using best value for max_leaf_nodes
iowa_model = DecisionTreeRegressor(max_leaf_nodes=100, random_state=1)
iowa_model.fit(train_X, train_y)
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)print("Validation MAE for best value of max_leaf_nodes: {:,.0f}".format(val_mae))# Define the model. Set random_state to 1
rf_model = RandomForestRegressor(random_state=1)
rf_model.fit(train_X, train_y)
rf_val_predictions = rf_model.predict(val_X)
rf_val_mae = mean_absolute_error(rf_val_predictions, val_y)print("Validation MAE for Random Forest Model: {:,.0f}".format(rf_val_mae))
Validation MAE when not specifying max_leaf_nodes: 28,365
Validation MAE for best value of max_leaf_nodes: 26,087
Validation MAE for Random Forest Model: 18,974
d:\python27\lib\site-packages\sklearn\ensemble\forest.py:248: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22.
"10 in version 0.20 to 100 in 0.22.", FutureWarning)
Create a model :采用了Random Forest
# To improve accuracy, create a new Random Forest model which you will train on all training data
rf_model_on_full_data = RandomForestRegressor(random_state=1)# fit rf_model_on_full_data on all data from the
rf_model_on_full_data.fit(X,y)
# path to file you will use for predictions
test_data_path = r'G:/kaggle/housePrice/test.csv'# read test data file using pandas
test_data = pd.read_csv(test_data_path)# create test_X which comes from test_data but includes only the columns you used for prediction.# The list of columns is stored in a variable called features
test_X = test_data[features]#handle NaN , mean values every columnfrom sklearn.impute import SimpleImputer
my_imputer = SimpleImputer()
test_X = my_imputer.fit_transform(test_X)# make predictions which we will submit.
test_preds = rf_model_on_full_data.predict(test_X)# The lines below shows you how to save your data in the format needed to score it in the competition
output = pd.DataFrame({'Id': test_data.Id,'SalePrice': test_preds})
output.to_csv(r'G:/kaggle/housePrice/submission.csv',index=0)
output.head()
Id SalePrice
01461121550.811462151415.021463174114.031464175490.041465201250.0