数据分析和预测泰坦尼克号船员存活率(详细)

#通过pandas导入数据
import pandas as pd
titanic=pd.read_csv("titanic_train.csv")
print(titanic.head(5))
#因为Age项里有空的数据,这里使用平均值去填补fillna
titanic["Age"]=titanic["Age"].fillna(titanic["Age"].median())
print(titanic.describe())
#将一些字符型变为数值方便处理
print(titanic["Sex"].unique())
titanic.loc[titanic["Sex"]=="meal","Sex"]=0
titanic.loc[titanic["Sex"]=="femeal","Sex"]=1
print(titanic["Embarked"].unique())
titanic["Embarked"]=titanic["Embarked"].fillna('S')
titanic.loc[titanic["Embarked"]=="S","Embarked"]=0
titanic.loc[titanic["Embarked"]=="C","Embarked"]=1
titanic.loc[titanic["Embarked"]=="Q","Embarked"]=2
#线性回归训练
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import KFold
#分类的特征
predictors=["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
alg=LinearRegression()#线性回归的模型
kf=KFold(titanic.shape[0],n_flods=3,random_state=1)#交叉验证,不停切换val和train的小部分
predictions=[]
for train,test in kf:
    train_preditors=(titanic[predictors].iloc[train,:])#训练所使用的数据
    train_target=titanic["Survived"].iloc[train]#训练所使用的标签
    alg.fit(train_preditors,train_target)#用数据去fit线性回归的模型
    test_predictions=alg.predict(titanic[predictors].iloc[test,:])
    predictions.append(test_predictions)
#需要0或者1,以上为小数,要转换一下
import numpy as np
predictions=np.concatenate(predictions,axis=0)
predictions[predictors>.5]=1
predictions[predictors<.5]=0
#判断是否和标签一样,一样就是1,不一样就是0
accuracy=len(predictions[predictions==titanic["Survived"]])/len(predictions)
print(accuracy)
#逻辑回归训练
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
# Initialize our algorithm
alg = LogisticRegression(random_state=1)
# Compute the accuracy score for all the cross validation folds.  (much simpler than what we did before!)
scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3)
# Take the mean of the scores (because we have one for each fold)
print(scores.mean())
#对测试集进行预处理
titanic_test = pandas.read_csv("test.csv")
titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median())
titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median())
titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0 
titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1
titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S")

titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0
titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1
titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2
#使用随机森林进行,优先选择的模型,集成了很多基础模型。
#随机抽取特征进行训练。
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier

predictors=["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
#参数选取,10棵树,最小切分点为2,叶子结点最小为1.决定了树的高度,
alg=RandomForestClassifier(random_state=1,n_estimators=10,min_samples_split=2,min_samples_leaf=1)
kf=cross_validation.KFold(titanic.shape[0],n_flods=3,random_state=1)
scores=cross_validation.cross_val_score(alg,titanic[predictors],titanic["Survived"],cv=kf)
print(score.mean())
#修改参数
alg=RandomForestClassifier(random_state=1,n_estimators=50,min_samples_split=4,min_samples_leaf=2)
kf=cross_validation.KFold(titanic.shape[0],n_flods=3,random_state=1)
scores=cross_validation.cross_val_score(alg,titanic[predictors],titanic["Survived"],cv=kf)
print(score.mean())
#数据特征判断,家庭成员和名字长度
# Generating a familysize column
titanic["FamilySize"] = titanic["SibSp"] + titanic["Parch"]
# The .apply method generates a new series
titanic["NameLength"] = titanic["Name"].apply(lambda x: len(x))
#名字里也有特征,进行提取
import re
# A function to get the title from a name.
def get_title(name):
    # Use a regular expression to search for a title.  Titles always consist of capital and lowercase letters, and end with a period.
    title_search = re.search(' ([A-Za-z]+)\.', name)
    # If the title exists, extract and return it.
    if title_search:
        return title_search.group(1)
    return ""
# Get all the titles and print how often each one occurs.
titles = titanic["Name"].apply(get_title)
print(pd.value_counts(titles))

# Map each title to an integer.  Some titles are very rare, and are compressed into the same codes as other titles.
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Dr": 5, "Rev": 6, "Major": 7, "Col": 7, "Mlle": 8, "Mme": 8, "Don": 9, "Lady": 10, "Countess": 10, "Jonkheer": 10, "Sir": 9, "Capt": 7, "Ms": 2}
for k,v in title_mapping.items():
    titles[titles == k] = v

# Verify that we converted everything.
print(pd.value_counts(titles))

# Add in the title column.
titanic["Title"] = titles
#特征的重要程度提取,这里原理是故意对其中一个变量错误抖动,看结果变化大不大,如果大,为重要特征。
import numpy as np
from sklearn.feature_selection import SelectKBest, f_classif
import matplotlib.pyplot as plt
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked", "FamilySize", "Title", "NameLength"]

# Perform feature selection
selector = SelectKBest(f_classif, k=5)
selector.fit(titanic[predictors], titanic["Survived"])

# Get the raw p-values for each feature, and transform from p-values into scores
scores = -np.log10(selector.pvalues_)

# Plot the scores.  See how "Pclass", "Sex", "Title", and "Fare" are the best?
plt.bar(range(len(predictors)), scores)
plt.xticks(range(len(predictors)), predictors, rotation='vertical')
plt.show()

# Pick only the four best features.
predictors = ["Pclass", "Sex", "Fare", "Title"]

alg = RandomForestClassifier(random_state=1, n_estimators=50, min_samples_split=8, min_samples_leaf=4)
#通过使用不同的预测模型,进行投票的方式提高准确率,当然差别要大一点
from sklearn.ensemble import GradientBoostingClassifier
import numpy as np

# The algorithms we want to ensemble.
# We're using the more linear predictors for the logistic regression, and everything with the gradient boosting classifier.
algorithms = [
    [GradientBoostingClassifier(random_state=1, n_estimators=25, max_depth=3), ["Pclass", "Sex", "Age", "Fare", "Embarked", "FamilySize", "Title",]],
    [LogisticRegression(random_state=1), ["Pclass", "Sex", "Fare", "FamilySize", "Title", "Age", "Embarked"]]
]

# Initialize the cross validation folds
kf = KFold(titanic.shape[0], n_folds=3, random_state=1)

predictions = []
for train, test in kf:
    train_target = titanic["Survived"].iloc[train]
    full_test_predictions = []
    # Make predictions for each algorithm on each fold
    for alg, predictors in algorithms:
        # Fit the algorithm on the training data.
        alg.fit(titanic[predictors].iloc[train,:], train_target)
        # Select and predict on the test fold.  
        # The .astype(float) is necessary to convert the dataframe to all floats and avoid an sklearn error.
        test_predictions = alg.predict_proba(titanic[predictors].iloc[test,:].astype(float))[:,1]
        full_test_predictions.append(test_predictions)
    # Use a simple ensembling scheme -- just average the predictions to get the final classification.
    test_predictions = (full_test_predictions[0] + full_test_predictions[1]) / 2
    # Any value over .5 is assumed to be a 1 prediction, and below .5 is a 0 prediction.
    test_predictions[test_predictions <= .5] = 0
    test_predictions[test_predictions > .5] = 1
    predictions.append(test_predictions)

# Put all the predictions together into one array.
predictions = np.concatenate(predictions, axis=0)

# Compute accuracy by comparing to the training data.
accuracy = sum(predictions[predictions == titanic["Survived"]]) / len(predictions)
print(accuracy)
#对不同的模型,设置权重。
predictors = ["Pclass", "Sex", "Age", "Fare", "Embarked", "FamilySize", "Title"]

algorithms = [
    [GradientBoostingClassifier(random_state=1, n_estimators=25, max_depth=3), predictors],
    [LogisticRegression(random_state=1), ["Pclass", "Sex", "Fare", "FamilySize", "Title", "Age", "Embarked"]]
]

full_predictions = []
for alg, predictors in algorithms:
    # Fit the algorithm using the full training data.
    alg.fit(titanic[predictors], titanic["Survived"])
    # Predict using the test dataset.  We have to convert all the columns to floats to avoid an error.
    predictions = alg.predict_proba(titanic_test[predictors].astype(float))[:,1]
    full_predictions.append(predictions)

# The gradient boosting classifier generates better predictions, so we weight it higher.
predictions = (full_predictions[0] * 3 + full_predictions[1]) / 4
predictions
  • 1
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
您好!关于使用RStudio预测泰坦尼克号存活率,您可以按照以下步骤进行: 1. 导入数据集:在RStudio使用read.csv()函数导入泰坦尼克数据集。 2. 数据清洗:对数据集进行清洗,去除缺失值、异常值等。同时,将字符型变量转换为数值型变量,以便于后续模型的建立。 3. 特征工程:根据数据集的特征,提取出有用的特征,例如性别、年龄、船舱等级等。 4. 模型建立:使用机器学习算法建立模型,例如逻辑回归、决策树、随机森林等。 5. 模型评估:使用交叉验证等方法对模型进行评估,选择最优的模型。 6. 模型预测:使用训练好的模型对测试集进行预测,得出预测结果。 下面是一个简单的示例代码,供您参考: ``` # 导入数据集 titanic <- read.csv("titanic.csv") # 数据清洗 titanic <- na.omit(titanic) titanic$Sex <- as.numeric(factor(titanic$Sex)) titanic$Embarked <- as.numeric(factor(titanic$Embarked)) # 特征工程 features <- c("Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked") titanic <- titanic[, features] # 模型建立 library(caret) trainIndex <- createDataPartition(titanic$Survived, p = .7, list = FALSE) train <- titanic[trainIndex, ] test <- titanic[-trainIndex, ] fitControl <- trainControl(method = "cv", number = 5) model <- train(Survived ~ ., data = train, method = "glm", trControl = fitControl) # 模型评估 model # 模型预测 predictions <- predict(model, newdata = test) ``` 希望能对您有所帮助!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值