实验目的
- 掌握数据预处理方法;
2.通过参数寻优,构造最优分类模型。
实验内容
1. 基于泰坦尼克号数据集,构建一个可以根据乘客个人信息,如性别、年龄、船舱等级等来推测乘客是否生存的分类模型。要求:
(1)在学习过的分类算法中至少选择3种分别进行模型的选择、训练和预测;
(2)使用Scikit-learn提供的GridSearchCV函数进行参数寻优;
(3)test.csv文件中没有标签列,可以只使用训练数据集文件train.csv进行
训练集和测试集的划分、训练、测试与评价;
(4)将数据集test.csv的预测结果上传到kaggle平台进行检验的请在WORD文
档中说明并截图,会有额外加分。
3.代码
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
# 加载数据集
train_data = pd.read_csv(r'F:\File\大三下\机器学习\监督学习综合实验\titanic_train.csv')
test_data = pd.read_csv(r'F:\File\大三下\机器学习\监督学习综合实验\titanic_test.csv')
# 合并训练集和测试集以便进行统一的预处理
full_data = pd.concat([train_data.drop('Survived', axis=1), test_data], axis=0)
# 对数据进行预处理和特征选择
X = full_data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1)
y_train = train_data['Survived']
# 将类别型特征转换为数值型特征
categorical_features = ['Sex', 'Embarked']
numeric_features = ['Age', 'Fare', 'Pclass', 'SibSp', 'Parch']
# 处理缺失值
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# 对整个数据集进行拟合和转换
X_processed = preprocessor.fit_transform(X)
# 划分训练集和验证集
X_train = X_processed[:len(train_data)]
X_test = X_processed[len(train_data):]
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
# 定义分类器
classifiers = {
'Random Forest': RandomForestClassifier(),
'SVM': SVC(),
'Logistic Regression': LogisticRegression()
}
# 定义参数网格
param_grids = {
'Random Forest': {'classifier__n_estimators': [100, 200, 300], 'classifier__max_depth': [5, 10, 15]},
'SVM': {'classifier__C': [0.1, 1, 10], 'classifier__gamma': [0.1, 0.01, 0.001], 'classifier__kernel': ['rbf']},
'Logistic Regression': {'classifier__C': [0.1, 1, 10], 'classifier__penalty': ['l2']}
}
# 创建Pipeline并使用GridSearchCV进行参数搜索和模型选择
results = {}
for clf_name, clf in classifiers.items():
pipe = Pipeline(steps=[('classifier', clf)])
param_grid = param_grids[clf_name]
grid_search = GridSearchCV(pipe, param_grid, cv=5)
grid_search.fit(X_train, y_train)
results[clf_name] = grid_search
# 输出最优模型的结果
for clf_name, result in results.items():
print(f"{clf_name}: Best parameters - {result.best_params_}, Best score - {result.best_score_}")
# 使用最优模型在验证集上进行预测和评价
best_clf = results['Random Forest'].best_estimator_
y_pred = best_clf.predict(X_val)
accuracy = accuracy_score(y_val, y_pred)
print(f"Validation Accuracy: {accuracy}")
# 在测试集上进行预测
y_test_pred = best_clf.predict(X_test)
# 生成提交文件
submission = pd.DataFrame({'PassengerId': test_data['PassengerId'], 'Survived': y_test_pred})
submission.to_csv('submission.csv', index=False)