【Datawhale AI 夏令营2024】药效预测(3)

lgm实现

LightGBM (Light Gradient Boosting Machine)是一个实现 GBDT 算法的框架,支持高效率的并行训练,并且具有以下优点:
更快的训练速度
更低的内存消耗
更好的准确率
分布式支持,可以快速处理海量数据

1. 引入新特征

1.1 对task2特征再刻画

def siRNA_feat_builder3(s: pd.Series, anti: bool = False):
    name = "anti" if anti else "sense"
    df = s.to_frame()

    # 长度分组
    df[f"feat_siRNA_{name}_len21"] = (s.str.len() == 21)
    # 省略号标识以此类推构造特征
    ...

    # GC含量
    GC_frac = (s.str.count("G") + s.str.count("C"))/s.str.len()
    df[f"feat_siRNA_{name}_GC_in"] = (GC_frac >= 0.36) & (GC_frac <= 0.52)

    # 局部GC含量
    GC_frac1 = (s.str[1:7].str.count("G") + s.str[1:7].str.count("C"))/s.str[1:7].str.len()
    ...
    
    df[f"feat_siRNA_{name}_GC_in1"] = GC_frac1
    ...

    return df.iloc[:, 1:]

1.2 修饰siRNA构建特征

def siRNA_feat_builder3_mod(s: pd.Series, anti: bool = False):
    name = "anti" if anti else "sense"
    df = s.to_frame()
    
    # 修饰RNA的起始、终止位置单元类别
    for pos in [0, -1]:
        for c in voc_ls:
            ...
    for pos in [1, -2]:
        for c in voc_ls:
            ...

    return df.iloc[:, 1:]
class GenomicTokenizer:
    def __init__(self, ngram=5, stride=2):
        # 初始化分词器,设置n-gram长度和步幅
        self.ngram = ngram
        self.stride = stride
        
    def tokenize(self, t):

        # 字符串变list
        if isinstance(t, str):
            t = list(t)

        if self.ngram == 1:
            # 如果n-gram长度为1,直接将序列转换为字符列表
            toks = t
        else:
            # 否则,按照步幅对序列进行n-gram分词
            toks = [t[i:i+self.ngram] for i in range(0, len(t), self.stride) if len(t[i:i+self.ngram]) == self.ngram]
        
            # 如果最后一个分词长度小于n-gram,移除最后一个分词
            if len(toks[-1]) < self.ngram:
                toks = toks[:-1]

            # sub list to str
            toks = [''.join(x) for x in toks]

        # 返回分词结果
        return toks

class GenomicVocab:
    def __init__(self, itos):
        # 初始化词汇表,itos是一个词汇表列表
        self.itos = itos
        # 创建从词汇到索引的映射
        self.stoi = {v: k for k, v in enumerate(self.itos)}
        
    @classmethod
    def create(cls, tokens, max_vocab, min_freq):
        # 创建词汇表类方法
        # 统计每个token出现的频率
        freq = Counter(tokens)
        # 选择出现频率大于等于min_freq的token,并且最多保留max_vocab个token
        # itos = ['<pad>'] + [o for o, c in freq.most_common(max_vocab - 1) if c >= min_freq]
        itos = [o for o, c in freq.most_common(max_vocab - 1) if c >= min_freq]
        # 返回包含词汇表的类实例
        return cls(itos)
    
def siRNA_feat_builder_substr(se, name, patterns):
    
    # 创建一个空字典来存储特征
    features = {}

    for pattern in patterns:
        try:
            # escaped_pattern = re.escape(pattern)  # 转义模式中的特殊字符
            escaped_pattern = pattern
            features[f"feat_{name}_seq_pattern_{escaped_pattern}"] = se.str.count(escaped_pattern)
        except re.error as e:
            print(f"Error in pattern {pattern}: {e}")

    # 将字典转换为DataFrame
    feature_df = pd.DataFrame(features)

    return feature_df
# 处理序列特征
seq_features_df = pd.DataFrame()

tokenizer1 = GenomicTokenizer(ngram=1, stride=1) # 1gram
tokenizer2 = GenomicTokenizer(ngram=2, stride=1) # 2gram
tokenizer3 = GenomicTokenizer(ngram=3, stride=1) # 3gram

# 子串词频统计,未修饰序列
cols_nomod = ["siRNA_sense_seq", "siRNA_antisense_seq"]
all_tokens_nomod = []
for col in cols_nomod:
    for seq in df[col]:
        if pd.isna(seq):
            continue
        ...
print('#all_tokens_nomod: ', len(all_tokens_nomod))

vocab_nomod = GenomicVocab.create(all_tokens_nomod, max_vocab=100000, min_freq=1)
print('#vocab_nomod: ', len(vocab_nomod.itos))

for col in cols_nomod:
    ...
# 子串词频统计,修饰序列
cols_mod = ["modified_siRNA_sense_seq", "modified_siRNA_antisense_seq"]
cols_mod_ls = ["modified_siRNA_sense_seq_list", "modified_siRNA_antisense_seq_list"]
all_tokens_mod = []
for col in cols_mod_ls:
    for seq_ls in df[col]:
        if pd.isna(seq_ls):
            continue
        ...
print('#all_tokens_mod: ', len(all_tokens_mod))

vocab_mod = GenomicVocab.create(all_tokens_mod, max_vocab=100000, min_freq=1)
print('#vocab_mod: ', len(vocab_mod.itos))

for col in cols_mod:
    ...

1.3 siRNA序列与target序列对比

def get_feat_align(df, anti: bool = False):
    # 提示:https://biopython.org/docs/1.76/api/Bio.pairwise2.html
    # 使用pairwise2.align.localxx
    ...

2. lgm优化

2.1 低Remaining范围样本高权重

weight_ls = np.array(feats['mRNA_remaining_pct'].apply(lambda x:2 if ((x<=30)and(x>=0)) else 1))

2.2 使用官方评价指标作为损失函数

由原来的root_mean_squared_error评价指标被替换为更加复杂的官方评价分数,具体公式为:
score = 50 % × ( 1 − MAE 100 ) + 50 % × F 1 × ( 1 − Range-MAE 100 ) \text{score} = 50\% \times \left(1 - \frac{\text{MAE}}{100}\right) + 50\% \times F1 \times \left(1 - \frac{\text{Range-MAE}}{100}\right) score=50%×(1100MAE)+50%×F1×(1100Range-MAE)

# calculate_metrics函数用于计算评估指标
def calculate_metrics(preds, data, threshold=30):
    y_pred = preds
    y_true = data.get_label()
    mae = np.mean(np.abs(y_true - y_pred))
    # if mae < 0: mae = 0
    # elif mae >100: mae = 100

    y_true_binary = ((y_true <= threshold) & (y_true >= 0)).astype(int)
    y_pred_binary = ((y_pred <= threshold) & (y_pred >= 0)).astype(int)

    mask = (y_pred >= 0) & (y_pred <= threshold)
    range_mae = (
        mean_absolute_error(y_true[mask], y_pred[mask]) if np.sum(mask) > 0 else 100
    )
    # if range_mae < 0: range_mae = 0
    # elif range_mae >100: range_mae = 100

    # precision = precision_score(y_true_binary, y_pred_binary, average="binary")
    # recall = recall_score(y_true_binary, y_pred_binary, average="binary")

    if np.sum(y_pred_binary) > 0:
        precision = (np.array(y_pred_binary) & y_true_binary).sum()/np.sum(y_pred_binary)
    else:
        precision = 0
    if np.sum(y_true_binary) > 0:
        recall = (np.array(y_pred_binary) & y_true_binary).sum()/np.sum(y_true_binary)
    else:
        recall = 0

    if precision + recall == 0:
        f1 = 0
    else:
        f1 = 2 * precision * recall / (precision + recall)
    score = (1 - mae / 100) * 0.5 + (1 - range_mae / 100) * f1 * 0.5
    return "custom_score", score, True  # True表示分数越高越好

2.3 自适应学习率

# adaptive_learning_rate函数用于自适应学习率
def adaptive_learning_rate(decay_rate=0.8, patience=50):
    best_score = float("-inf")  # 初始化为负无穷,因为分数越高越好
    wait = 0

    def callback(env):
        nonlocal best_score, wait
        current_score = env.evaluation_result_list[-1][2]  # 假设使用的是最后一个评估指标
        current_lr =  env.model.params.get('learning_rate')

        if current_score > best_score: 
            best_score = current_score
            # wait = 0 # 需要连续的score没有上升
        else:
            wait += 1

        if wait >= patience:
            new_lr = float(current_lr) * decay_rate
            wait = 0
            env.model.params['learning_rate'] = new_lr
            print(f"Learning rate adjusted to {env.model.params.get('learning_rate')}")

    return callback

2.4 多折交叉训练

# train函数用于训练模型
def train(feats, n_original):
    # 定义k折交叉验证
    n_splits = 10
    kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)
    # 开始k折交叉验证
    gbms = []
    for fold, (train_idx, val_idx) in enumerate(
        kf.split(feats.iloc[:n_original, :]), 1
    ):
        # 准备训练集和验证集
        X_train, X_val = feats.iloc[train_idx, :-1], feats.iloc[val_idx, :-1]
        y_train, y_val = feats.iloc[train_idx, -1], feats.iloc[val_idx, -1]
        w_train = weight_ls[train_idx]
        

        # 创建LightGBM数据集
        train_data = lgb.Dataset(X_train, label=y_train, weight=w_train)
        val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)

        boost_round = 25000
        early_stop_rounds = int(boost_round*0.1)

        # 显示metric
        lgb_log = lgb.log_evaluation(period=200, show_stdv=True)
        lgb_stop = lgb.early_stopping(stopping_rounds=early_stop_rounds, first_metric_only=True, verbose=True, min_delta=0.00001)

        # 设置LightGBM参数
        params = {
            "boosting_type": "gbdt",
            "objective": "regression",
            "metric": "None",
            # "metric": "root_mean_squared_error",
            "max_depth": 8,
            "num_leaves": 63,
            "min_data_in_leaf": 2,
            "learning_rate": 0.05,
            "feature_fraction": 0.9,
            "lambda_l1": 0.1,
            "lambda_l2": 0.2,
            "verbose": -1, # -1时不输出
            "early_stopping_round": early_stop_rounds,
            "num_threads": 8,
        }

        # 在训练时使用自适应学习率回调函数
        adaptive_lr = adaptive_learning_rate(decay_rate=0.9, patience=1000)
        gbm = lgb.train(
            params,
            train_data,
            num_boost_round=boost_round,
            valid_sets=[val_data],
            feval=calculate_metrics,  # 将自定义指标函数作为feval参数传入
            # callbacks=[print_validation_result, adaptive_lr, lgb_log, lgb_stop],
            callbacks=[adaptive_lr, lgb_log, lgb_stop],
        )
        valid_score = gbm.best_score["valid_0"]["custom_score"]
        print(f"best_valid_score: {valid_score}")
        gbms.append(gbm)

    return gbms

参考文章

LightGBM算法总结

交叉检验

  • 19
    点赞
  • 17
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值