AI+siRNA药物药效预测 ——Datawhale AI夏令营

AI+siRNA药物药效预测 ——Datawhale AI夏令营

代码展示

#%%
import pandas as pd#导入pandas库,用于数据分析和处理。
df_original = pd.read_csv("E:/python program/pythonProject1/data/siRNA_0715/train_data.csv")#读取原始训练数据
n_original = df_original.shape[0]#这行代码计算了df_original DataFrame中的行数,也就是原始训练数据的样本数。
df_submit = pd.read_csv("E:/python program/pythonProject1/data/siRNA_0715/sample_submission.csv")#读取提交样例数据
df = pd.concat([df_original, df_submit], axis=0).reset_index(drop=True)
#这行代码使用pd.concat()函数将df_original和df_submit两个DataFrame沿着行的方向(axis=0)合并成一个新的DataFrame df。
# 参数reset_index(drop=True)的作用是重新设置索引,并丢弃合并前的旧索引,生成一个新的连续索引。
#%%
def siRNA_feat_builder(s: pd.Series, anti: bool = False):
    name = "anti" if anti else "sense"
    df = s.to_frame()
    df[f"feat_siRNA_{name}_seq_len"] = s.str.len()
    for pos in [0, -1]:
        for c in list("AUGC"):
            df[f"feat_siRNA_{name}_seq_{c}_{'front' if pos == 0 else 'back'}"] = (
                s.str[pos] == c
            )
    df[f"feat_siRNA_{name}_seq_pattern_1"] = s.str.startswith("AA") & s.str.endswith(
        "UU"
    )
    df[f"feat_siRNA_{name}_seq_pattern_2"] = s.str.startswith("GA") & s.str.endswith(
        "UU"
    )
    df[f"feat_siRNA_{name}_seq_pattern_3"] = s.str.startswith("CA") & s.str.endswith(
        "UU"
    )
    df[f"feat_siRNA_{name}_seq_pattern_4"] = s.str.startswith("UA") & s.str.endswith(
        "UU"
    )
    df[f"feat_siRNA_{name}_seq_pattern_5"] = s.str.startswith("UU") & s.str.endswith(
        "AA"
    )
    df[f"feat_siRNA_{name}_seq_pattern_6"] = s.str.startswith("UU") & s.str.endswith(
        "GA"
    )
    df[f"feat_siRNA_{name}_seq_pattern_7"] = s.str.startswith("UU") & s.str.endswith(
        "CA"
    )
    df[f"feat_siRNA_{name}_seq_pattern_8"] = s.str.startswith("UU") & s.str.endswith(
        "UA"
    )
    df[f"feat_siRNA_{name}_seq_pattern_9"] = s.str[1] == "A"
    df[f"feat_siRNA_{name}_seq_pattern_10"] = s.str[-2] == "A"
    df[f"feat_siRNA_{name}_seq_pattern_GC_frac"] = (
        s.str.contains("G") + s.str.contains("C")
    ) / s.str.len()
    return df.iloc[:, 1:]
#%%
df_publication_id = pd.get_dummies(df.publication_id)
df_publication_id.columns = [
    f"feat_publication_id_{c}" for c in df_publication_id.columns
]
df_gene_target_symbol_name = pd.get_dummies(df.gene_target_symbol_name)
df_gene_target_symbol_name.columns = [
    f"feat_gene_target_symbol_name_{c}" for c in df_gene_target_symbol_name.columns
]
df_gene_target_ncbi_id = pd.get_dummies(df.gene_target_ncbi_id)
df_gene_target_ncbi_id.columns = [
    f"feat_gene_target_ncbi_id_{c}" for c in df_gene_target_ncbi_id.columns
]
df_gene_target_species = pd.get_dummies(df.gene_target_species)
df_gene_target_species.columns = [
    f"feat_gene_target_species_{c}" for c in df_gene_target_species.columns
]
siRNA_duplex_id_values = df.siRNA_duplex_id.str[3:-2].str.strip(".").astype("int")
siRNA_duplex_id_values = (siRNA_duplex_id_values - siRNA_duplex_id_values.min()) / (
    siRNA_duplex_id_values.max() - siRNA_duplex_id_values.min()
)
df_siRNA_duplex_id = pd.DataFrame(siRNA_duplex_id_values)
df_cell_line_donor = pd.get_dummies(df.cell_line_donor)
df_cell_line_donor.columns = [
    f"feat_cell_line_donor_{c}" for c in df_cell_line_donor.columns
]
df_cell_line_donor["feat_cell_line_donor_hepatocytes"] = (
    (df.cell_line_donor.str.contains("Hepatocytes")).fillna(False).astype("int")
)
df_cell_line_donor["feat_cell_line_donor_cells"] = (
    df.cell_line_donor.str.contains("Cells").fillna(False).astype("int")
)
df_siRNA_concentration = df.siRNA_concentration.to_frame()
df_Transfection_method = pd.get_dummies(df.Transfection_method)
df_Transfection_method.columns = [
    f"feat_Transfection_method_{c}" for c in df_Transfection_method.columns
]
df_Duration_after_transfection_h = pd.get_dummies(df.Duration_after_transfection_h)
df_Duration_after_transfection_h.columns = [
    f"feat_Duration_after_transfection_h_{c}"
    for c in df_Duration_after_transfection_h.columns
]
feats = pd.concat(
    [
        df_publication_id,
        df_gene_target_symbol_name,
        df_gene_target_ncbi_id,
        df_gene_target_species,
        df_siRNA_duplex_id,
        df_cell_line_donor,
        df_siRNA_concentration,
        df_Transfection_method,
        df_Duration_after_transfection_h,
        siRNA_feat_builder(df.siRNA_sense_seq, False),
        siRNA_feat_builder(df.siRNA_antisense_seq, True),
        df.iloc[:, -1].to_frame(),
    ],
    axis=1,
)
#%%
import lightgbm as lgb
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(
    feats.iloc[:n_original, :-1],
    feats.iloc[:n_original, -1],
    test_size=0.2,
    random_state=42,
)
#%%
train_data = lgb.Dataset(X_train, label=y_train)
test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)


# 定义一个回调函数来打印验证集的结果
def print_validation_result(env):
    result = env.evaluation_result_list[-1]
    print(f"[{env.iteration}] {result[1]}'s {result[0]}: {result[2]}")


params = {
    "boosting_type": "gbdt",
    "objective": "regression",
    "metric": "root_mean_squared_error",
    "max_depth": 7,
    "learning_rate": 0.02,
    "verbose": 0,
}
gbm = lgb.train(
    params,
    train_data,
    num_boost_round=15000,
    valid_sets=[test_data],
    callbacks=[print_validation_result],
)
#%%
y_pred = gbm.predict(feats.iloc[n_original:, :-1])
#%%
df_submit["mRNA_remaining_pct"] = y_pred
df_submit.to_csv("submission.csv", index=False)

解题思路

  1. 读取数据

    • 使用pandas读取原始训练数据和提交样例数据。
    • 合并训练数据和提交样例数据。
  2. 特征构建函数siRNA_feat_builder

    • 从siRNA序列中提取特征,如长度、前后两个位置的核苷酸、特定模式匹配等。根据官网赛事给出的信息 链接 : [link](http://competition.sais.com.cn/competitionDetail/532230/format)
  3. 特征提取

    • 使用pd.get_dummies将分类变量转换为独热编码(one-hot encoding)。
    • 将所有特征合并成一个DataFrame。
  4. 训练模型

    • 使用sklearn.model_selectiontrain_test_split将数据分成训练集和测试集。
    • 使用LightGBM进行训练,评估模型的性能。
  5. 预测并保存结果

    • 对提交数据进行预测,并将结果保存到CSV文件中。

遇到的疑惑

一、 为什么要将训练数据和样例数据合并成一个新的DataFrame?

// import pandas as pd
df_original = pd.read_csv("E:/python program/pythonProject1/data/siRNA_0715/train_data.csv")
n_original = df_original.shape[0]
df_submit = pd.read_csv("E:/python program/pythonProject1/data/siRNA_0715/sample_submission.csv")
df = pd.concat([df_original, df_submit], axis=0).reset_index(drop=True)

将训练数据 (df_original) 和提交样例数据 (df_submit) 合并成一个新的 DataFrame (df) 的目的是为了统一数据处理流程。

例子

假设你需要对数据进行归一化处理,如果不合并数据,可能需要分别计算训练数据和提交样例数据的统计信息:

# 分别处理
train_mean = df_original['feature'].mean()
train_std = df_original['feature'].std()
df_original['feature'] = (df_original['feature'] - train_mean) / train_std

submit_mean = df_submit['feature'].mean()
submit_std = df_submit['feature'].std()
df_submit['feature'] = (df_submit['feature'] - submit_mean) / submit_std

合并数据后,只需要一次计算和处理:

# 合并处理
combined_mean = df['feature'].mean()
combined_std = df['feature'].std()
df['feature'] = (df['feature'] - combined_mean) / combined_std

这样不仅简化了代码,还确保了特征的统一性。

二、 为什么需要进行独热编码这一步?

许多机器学习模型和算法无法直接处理分类数据(categorical data),因为它们通常只能处理数值数据。独热编码避免了分类变量的数值关系假设,保留了类别信息。

  • 3
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值