import pandas as pd
import numpy as np
#数据预处理-------------------------------------------------------------------------------------------------------------
DATA = pd.read_csv("titanic_train.csv")
print(DATA.describe())
DATA["Age"] = DATA["Age"].fillna(DATA["Age"].mean())#fillna()函数的用法
DATA.loc[DATA["Sex"] == "male", "Sex"] = 0#str变int
DATA.loc[DATA["Sex"] == "female", "Sex"] = 1
DATA["Embarked"] = DATA["Embarked"].fillna("S")
DATA.loc[DATA["Embarked"] == "S", "Embarked"] = 0
DATA.loc[DATA["Embarked"] == "C", "Embarked"] = 1
DATA.loc[DATA["Embarked"] == "Q", "Embarked"] = 2
#线性回归的预测---------------------------------------------------------------------------------------------------------
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
alg = LinearRegression()
kf = KFold(n_splits=3, random_state=1)
predictions = []
for train, test in kf.split(DATA):
train_predictors = DATA[predictors].iloc[train, :]
train_target = DATA["Survived"].iloc[train]
alg.fit(train_predictors.values, train_target.values)
test_predictions = alg.predict(DATA[predictors].iloc[test, :])
predictions.append(test_predictions)
predictions = np.concatenate(predictions, axis=0)
predictions[predictions > .5] = 1
predictions[predictions <= .5] = 0
correct = [1 if (a == 1 and b == 1) or (a == 0 and b == 0) else 0 for (a, b) in zip(predictions, DATA["Survived"])]
accuracy = sum(correct) / len(predictions)
print(accuracy)
#逻辑回归的预测------------------------------------------------------------------------------------------------------------------------------------------------------------------
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
alg = LogisticRegression(random_state=1)
scores = cross_val_score(alg, DATA[predictors], DATA["Survived"], cv=3)
print(scores)
print(scores.mean())
from sklearn.ensemble import RandomForestClassifier
alg = RandomForestClassifier(random_state=1, n_estimators=10, min_samples_split=2, min_samples_leaf=1)
kf = KFold(3, random_state=1)
scores = cross_val_score(alg, DATA[predictors], DATA["Survived"], cv=kf.split(DATA["Survived"]))
print(scores)
print(scores.mean())
alg = RandomForestClassifier(random_state=1, n_estimators=50, min_samples_split=4, min_samples_leaf=2)
kf = KFold(3, random_state=1)
scores = cross_val_score(alg, DATA[predictors], DATA["Survived"], cv=kf.split(DATA["Survived"]))
print(scores)
print(scores.mean())
DATA["FamilySize"] = DATA["SibSp"] + DATA["Parch"]
DATA["NameLength"] = DATA["Name"].apply(lambda x: len(x))
#加入了两个特征-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
import re
def get_title(name):
title_search = re.search("([A-Za-z]+)\.", name)
if title_search:
return title_search.group(1)
return ""
titles = DATA["Name"].apply(get_title)
print(titles.value_counts())
title_map = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4}
for k, v in title_map.items():
titles[titles == k] = v
def title_change(name):
if isinstance(name, str):
return 0
else:
return name
titles = titles.apply(title_change)
DATA["Title"] = titles
#特征选择----------------------------------------------------------------------------------------------------------------------------------------------
from sklearn.feature_selection import SelectKBest, f_classif
import matplotlib.pyplot as plt
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked", "FamilySize", "Title", "NameLength"]
selector = SelectKBest(f_classif, k=6)
selector.fit(DATA[predictors], DATA["Survived"])
scores = -np.log10(selector.pvalues_)
plt.bar(range(len(predictors)), scores)
plt.xticks(range(len(predictors)), predictors, rotation="vertical")
plt.show()
#随机森林的实现----------------------------------------------------------------------------------------------------------------------
kf = KFold(3, random_state=1)
alg = RandomForestClassifier(random_state=1, n_estimators=30, min_samples_split=8, min_samples_leaf=4)
full_predictions = []
scores = 0
for train, test in kf.split(DATA):
X_train = DATA[predictors].iloc[train, :]
Y_train = DATA["Survived"].iloc[train]
X_test = DATA[predictors].iloc[test, :]
alg.fit(X_train, Y_train)
scores += alg.score(DATA[predictors].iloc[test, :], DATA["Survived"].iloc[test])
print(scores / 3)
#集成学习的方法做出预测------------------------------------------------------------------------------------------------------------------------------------------------------
from sklearn.ensemble import GradientBoostingClassifier
import numpy as np
algorithms = [
[GradientBoostingClassifier(random_state=1, n_estimators=25, max_depth=3),
["Pclass", "Sex", "Age", "Fare", "Embarked", "FamilySize", "Title", ]],
[LogisticRegression(random_state=1), ["Pclass", "Sex", "Fare", "FamilySize", "Title", "Age", "Embarked"]]
]
print(type(algorithms))
kf = KFold(n_splits=3, random_state=1)
predictions = []
for train, test in kf.split(DATA):
full_test_predictions = []
y_train = DATA["Survived"].iloc[train]
for alg, predictors in algorithms:#注意这种取法可以取到第二层列表中的两个值
X_train = DATA[predictors].iloc[train, :]
alg.fit(X_train, y_train)
X_test = DATA[predictors].iloc[test, :]
test_predictions = alg.predict_proba(X_test.astype(float))[:, 1]#predict_proba函数会返回属于不同类别的proba
full_test_predictions.append(test_predictions)
test_predictions = (full_test_predictions[0] + full_test_predictions[1]) / 2
test_predictions[test_predictions <= .5] = 0
test_predictions[test_predictions > .5] = 1
predictions.append(test_predictions)
predictions = np.concatenate(predictions,axis = 0)
correct = [1 if (a == 1 and b == 1) or (a == 0 and b == 0) else 0 for (a, b) in zip(predictions, DATA["Survived"])]
accuracy = sum(correct) / len(predictions)
print(accuracy)
注意取值的时候,DataFrame通过 DATA.loc[DATA[“Embarked”] == “S”, “Embarked”],即既指定行又指定列
Series通过 titles[titles == k] = v,即可以直接等于
DataFrame只取一列中的某几个值的时候y_train = DATA[“Survived”].iloc[train],不需要制定列名