计算生物——VScode后台加载嵌入向量并保存_提高模型加载速度(09.23)

书接上文:计算生物——Code_Pytorch框架——蛋白靶点小分子药物对接亲和力预测_CPU(07.11-07.22)-CSDN博客

上次写到自己只抽取了300个样本(总共52000个样本),老师说可以后台加载嵌入向量并保存到文件后,再直接加载嵌入向量进行模型训练,会节省时间(不然每次都要先把序列转化成嵌入向量......)

上篇博客的结尾

一.tmux后台加载

1.启动一个新的 tmux 会话

在此之前,环境要先进入自己设置好的环境,比如我的仍然是bindingdb:conda activate bindingdb

再运行下面的代码:

tmux new -s bindingdb_session

2.在会话中运行脚本

可以先把鼠标放在需要运行的文件处----右键-----复制路径(确保路径正确

不要只是python save.py!!!

python /home/embark/rain/BindingDB/save.py

3.按 Ctrl + B,然后按 D,将会话放到后台运行。

4.重新连接到该会话

tmux attach -t bindingdb_session

5.查看完进度后,可以再次按 Ctrl + B 然后 D 退出并保持后台运行。

睡了一觉后,训练集和测试集的嵌入向量已保存~

二.关于save.py

print("开始运行脚本", flush=True)

import torch
print(torch.__version__, flush=True)
print(torch.cuda.is_available(), flush=True)
from torch.utils.data import Dataset, DataLoader, random_split
from sklearn.model_selection import train_test_split

import torch.nn as nn
import torch.nn.functional as F

import pandas as pd
from transformers import BertTokenizer, AutoTokenizer, BertModel, AutoModel,RobertaTokenizer,RobertaModel
import numpy as np

#ESM2 模型设置#
local_model_path = r'/home/embark/local_model'
# 加载本地的 tokenizer 和模型
esm2_tokenizer = AutoTokenizer.from_pretrained(local_model_path)

pytorch_path=r'/home/embark/local_model/pytorch_model.bin'
esm2_model = AutoModel.from_pretrained(local_model_path)

#ChemBERTa 模型#
local_model_path2 = r'/home/embark/rain/wenxue/chemBERTa/chemBERTa_files'
# 加载本地的 tokenizer 和模型
chemBERTa_tokenizer = RobertaTokenizer.from_pretrained(local_model_path2)
# 加载本地模型
chemBERTa_model = RobertaModel.from_pretrained(local_model_path2)

import os
# 获取当前工作目录
current_dir = os.getcwd()
print(current_dir, flush=True)
csv_file = r'/home/embark/rain/wenxue/BindingDB/BindingDB_all.csv'
# 确认路径
#print(f"CSV file path: {csv_file}", flush=True)
# 读取 CSV 文件
dataa = pd.read_csv(csv_file)
# 随机抽取n个样本
data_sampled = dataa.sample(n=52000, random_state=40)
data = data_sampled.dropna(subset=['Drug', 'Target', 'Y'])#删除这三列中有缺失值的行#
print(data.head(), flush=True)

# 对Y进行归一化处理
data['Y'] = (data['Y'] - data['Y'].min()) / (data['Y'].max() - data['Y'].min())
#print(data_sampled.head())

# 划分数据集为训练集、验证集和测试集
train_data, test_data = train_test_split(data, test_size=0.2, random_state=42)
train_data, val_data = train_test_split(train_data, test_size=0.25, random_state=42)  # 0.25 x 0.8 = 0.2
# 确保 train_data, val_data, 和 test_data 是 DataFrame
print(type(train_data))  # 应该输出 <class 'pandas.core.frame.DataFrame'>
print(type(val_data))    # 应该输出 <class 'pandas.core.frame.DataFrame'>
print(type(test_data))   # 应该输出 <class 'pandas.core.frame.DataFrame'>
print("train_data:", train_data)

# 计算Drug列中每一行序列的长度
data['Drug_length'] = data['Drug'].apply(len)
# 计算Target列中每一行序列的长度
data['Target_length'] = data['Target'].apply(len)
# 获取Drug列中序列长度的最大值
max_drug_length = data['Drug_length'].max()
# 获取Target列中序列长度的最大值
max_target_length = data['Target_length'].max()
print(f"Drug列中序列长度的最大值: {max_drug_length}")
print(f"Target列中序列长度的最大值: {max_target_length}")

class CustomDataset(Dataset):
    def __init__(self, data):
        self.data = data
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        return {
            'Drug': self.data.iloc[idx]['Drug'],
            'Target': self.data.iloc[idx]['Target'],
            'Y': self.data.iloc[idx]['Y']
        }

# 定义处理长序列的函数
def process_chunks(model, tokenizer, seq, max_length=128):
    # 将长序列分段
    tokens = tokenizer(seq, return_tensors="pt", padding='max_length', truncation=True, max_length=max_length)['input_ids'].squeeze()
    chunks = [tokens[i:i + max_length] for i in range(0, len(tokens), max_length)]
    
    embeddings = []
    with torch.no_grad():
        for chunk in chunks:
            inputs = {'input_ids': chunk.unsqueeze(0)}
            outputs = model(**inputs)
            chunk_embedding = outputs.last_hidden_state.mean(dim=1).squeeze()
            embeddings.append(chunk_embedding)
    
    # 合并所有段的嵌入
    final_embedding = torch.stack(embeddings).mean(dim=0)
    return final_embedding

# 定义 smiles_to_vector 和 sequence_to_embedding 函数
def smiles_to_vector(seq):
    return process_chunks(chemBERTa_model, chemBERTa_tokenizer, seq, max_length=128)

def sequence_to_embedding(seq):
    return process_chunks(esm2_model, esm2_tokenizer, seq, max_length=128)

# 定义生成嵌入向量的函数(分批处理)
def generate_embeddings_in_batches3(data, smiles_to_vector, sequence_to_embedding, device, batch_size=100):
    dataset = CustomDataset(data)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
    
    drug_embeddings = []
    target_embeddings = []
    labels = []
    
    batch_count = 0  # 初始化批次计数器
    
    for batch in dataloader:
        batch_count += 1  # 每次进入循环增加批次计数器
        
        drug_batch = []
        target_batch = []
        label_batch = []
        
        # 打印调试信息
        print(f"Processing batch {batch_count}/{len(dataloader)}...")  # 打印当前处理的批次信息
        
        for i in range(len(batch['Drug'])):
            smiles = batch['Drug'][i]
            sequence = batch['Target'][i]
            label = batch['Y'][i].clone().detach().to(device)
            
            # 确保数据在 GPU 上进行处理
            drug_vector = smiles_to_vector(smiles).to(device)
            sequence_vector = sequence_to_embedding(sequence).to(device)
            
            drug_batch.append(drug_vector)
            target_batch.append(sequence_vector)
            label_batch.append(label)
        
        drug_embeddings.extend(drug_batch)
        target_embeddings.extend(target_batch)
        labels.extend(label_batch)
        
        # 及时释放 GPU 内存
        torch.cuda.empty_cache()
    
    # 将嵌入向量从 GPU 转移到 CPU 并转换为 numpy 数组
    drug_embeddings = torch.stack(drug_embeddings).cpu().numpy()
    target_embeddings = torch.stack(target_embeddings).cpu().numpy()
    labels = torch.stack(labels).cpu().numpy()
    
    # 打印嵌入向量的形状
    print("Drug embeddings shape:", drug_embeddings.shape)
    print("Target embeddings shape:", target_embeddings.shape)
    print("Labels shape:", labels.shape)
    
    return drug_embeddings, target_embeddings, labels

# 将 device 设置为 GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 生成嵌入向量并保存(分批处理)
train_drug_embeddings, train_target_embeddings, train_labels = generate_embeddings_in_batches3(train_data, smiles_to_vector, sequence_to_embedding, device)
# 保存训练集的嵌入向量
np.save('/home/embark/rain/BindingDB/train_drug_embeddings.npy', train_drug_embeddings)
np.save('/home/embark/rain/BindingDB/train_target_embeddings.npy', train_target_embeddings)
np.save('/home/embark/rain/BindingDB/train_labels.npy', train_labels)
print("训练集嵌入向量已保存。")
test_drug_embeddings, test_target_embeddings, test_labels = generate_embeddings_in_batches3(test_data, smiles_to_vector, sequence_to_embedding, device)
# 保存测试集的嵌入向量
np.save('/home/embark/rain/BindingDB/test_drug_embeddings.npy', test_drug_embeddings)
np.save('/home/embark/rain/BindingDB/test_target_embeddings.npy', test_target_embeddings)
np.save('/home/embark/rain/BindingDB/test_labels.npy', test_labels)
print("测试集嵌入向量已保存。")

其实主要内容和之前的博客内容差不多,主要是后面有np.save

三.训练并评估模型

1.加载嵌入向量

这一步没什么技术含量,主要就是确定好你的文件路径是否正确就行~

import torch
print(torch.__version__)
print(torch.cuda.is_available())
from torch.utils.data import Dataset, DataLoader, random_split
from sklearn.model_selection import train_test_split
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from transformers import BertTokenizer, AutoTokenizer, BertModel, AutoModel,RobertaTokenizer,RobertaModel
import numpy as np

# 加载嵌入向量
train_drug_embeddings = np.load('train_drug_embeddings.npy')
train_target_embeddings = np.load('train_target_embeddings.npy')
train_labels = np.load('train_labels.npy')

test_drug_embeddings = np.load('test_drug_embeddings.npy')
test_target_embeddings = np.load('test_target_embeddings.npy')
test_labels = np.load('test_labels.npy')

print("嵌入向量已加载。")

2.创建数据集和数据加载器

# 创建数据集
train_dataset = EmbeddingDataset(train_drug_embeddings, train_target_embeddings, train_labels)
test_dataset = EmbeddingDataset(test_drug_embeddings, test_target_embeddings, test_labels)

# 创建数据加载器
batch_size = 32
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

print("数据加载器已创建。")

3.定义模型

#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
# 定义一个简单的MLP模型
class MLP(nn.Module):
    def __init__(self, input_dim):
        super(MLP, self).__init__()
        self.linear1 = nn.Linear(input_dim, 512)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.2)
        self.linear2 = nn.Linear(512, 256)
        self.linear3 = nn.Linear(256, 1)

    def forward(self, drug, protein):
        x = torch.cat((drug, protein), dim=1)
        out = self.linear1(x)
        out = self.relu(out)
        out = self.dropout(out)
        out = self.linear2(out)
        out = self.relu(out)
        out = self.dropout(out)
        out = self.linear3(out)
        return out

# 计算更新后的输入维度
input_dim = 384 + 1280  # = 384 + 1280
model = MLP(input_dim).to(device)

criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

print(model)

(如果最后结果并没有达到预期,也可以尝试改进这个模型~)

4.训练模型

# 训练函数
def train_model_every(model, train_loader, criterion, optimizer, num_epochs,print_every):
    model.train()
    for epoch in range(num_epochs):
        running_loss = 0.0
        for i, (drug, protein, label) in enumerate(train_loader):
            optimizer.zero_grad()# 清空梯度
            outputs = model(drug, protein)
            loss = criterion(outputs.squeeze(), label)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            if (i + 1) % print_every == 0:
                print(f"Epoch [{epoch+1}/{num_epochs}], Batch [{i+1}/{len(train_loader)}], Loss: {loss.item():.4f}")
        
        epoch_loss = running_loss / len(train_loader)
        print(f"Epoch [{epoch+1}/{num_epochs}] Average Loss: {epoch_loss:.4f}")


# 设置训练参数
num_epochs = 100
print_every = 10

train_model_every(model, train_loader, criterion, optimizer, num_epochs, print_every)

总共训练了100次

5.评估模型

def concordance_index(y_true, y_pred):
    """
    计算一致性指数(CI)
    """
    pairs = 0
    concordant_pairs = 0
    n = len(y_true)
    
    for i in range(n):
        for j in range(i + 1, n):
            if y_true[i] != y_true[j]:
                pairs += 1
                if (y_pred[i] > y_pred[j] and y_true[i] > y_true[j]) or (y_pred[i] < y_pred[j] and y_true[i] < y_true[j]):
                    concordant_pairs += 1
                elif y_pred[i] == y_pred[j]:
                    concordant_pairs += 0.5
    
    return concordant_pairs / pairs
from sklearn.metrics import r2_score, mean_squared_error
from scipy.stats import pearsonr

def evaluate_model(model, test_loader, criterion):
    model.eval()
    mse, r2, p_val, CI = 0, 0, 0, 0  # 初始化评估指标
    all_labels = []
    all_outputs = []
    
    print("Starting evaluation...")

    with torch.no_grad():
        for batch_idx, (drug, protein, label) in enumerate(test_loader):
            print(f"Processing batch {batch_idx+1}/{len(test_loader)}")
            outputs = model(drug, protein)
            loss = criterion(outputs.squeeze(), label)
            mse += loss.item()
            
            all_labels.append(label.cpu().numpy())
            all_outputs.append(outputs.squeeze().cpu().numpy())

    print("Calculating final metrics...") 
    
    mse /= len(test_loader)
    all_labels = np.concatenate(all_labels)
    all_outputs = np.concatenate(all_outputs)
    
    r2 = r2_score(all_labels, all_outputs)
    p_val = pearsonr(all_labels, all_outputs)[0]
    CI = concordance_index(all_labels, all_outputs)
    
    print(f"Mean Squared Error: {mse:.4f}")
    print(f"R^2: {r2:.4f}")
    print(f"Pearson Correlation: {p_val:.4f}")
    print(f"Concordance Index: {CI:.4f}")
    
    return mse, r2, p_val, CI

# 评估模型
mse, r2, p_val, CI = evaluate_model(model, test_loader, criterion)
print(f"R^2: {r2}, Pearson Correlation: {p_val}, Concordance Index: {CI}")

对比结果:

决定系数R^2::0.4217(上篇博客为-0.0696)有很大的提高!

Pearson Correlation:0.65113(上篇博客为0.1620)预测值和实际值之间存在一定程度的正相关性

一致性指数CI: 0.5176(上篇博客为0.4551)

好的,以下是代码实现: ```python # 定义星座日期范围 constellation_date = [ ['01.20', '02.18', '水瓶座'], ['02.19', '03.20', '双鱼座'], ['03.21', '04.19', '白羊座'], ['04.20', '05.20', '金牛座'], ['05.21', '06.21', '双子座'], ['06.22', '07.22', '巨蟹座'], ['07.23', '08.22', '狮子座'], ['08.23', '09.22', '处女座'], ['09.23', '10.23', '天秤座'], ['10.24', '11.22', '天蝎座'], ['11.23', '12.21', '射手座'], ['12.22', '01.19', '摩羯座'] ] # 获取星座名称 def get_constellation(month, day): for item in constellation_date: start_date = item[0] end_date = item[1] if (month == '01' and day >= start_date[3:]) or (month == '12' and day <= end_date[3:]) or ( start_date <= month + '.' + day <= end_date): return item[2] # 获取用户输入的星座编号 def get_user_input(): print("请输入您的姓名:", end="") name = input().strip() print("请根据如上提示选择对应编号:") for index, item in enumerate(constellation_date): print("%d. %s" % (index + 1, item[2])) print("请输入:", end="") num = input().strip() return name, num # 分析用户星座并输出结果 def analyze_constellation(): name, num = get_user_input() if num.isdigit() and 1 <= int(num) <= 12: month = input("请输入出生月份(如:01、02等):").strip() day = input("请输入出生日期(如:01、02等):").strip() constellation = get_constellation(month, day) if constellation is not None: print("%s,您好!你的星座为:%s" % (name, constellation)) else: print("日期输入有误,请重新输入!") else: print("输入有误,请重新输入!") # 调用函数进行分析 analyze_constellation() ``` 运行结果示例: ``` 请输入您的姓名:Tom 请根据如上提示选择对应编号: 1. 水瓶座 2. 双鱼座 3. 白羊座 4. 金牛座 5. 双子座 6. 巨蟹座 7. 狮子座 8. 处女座 9. 天秤座 10. 天蝎座 11. 射手座 12. 摩羯座 请输入:1 请输入出生月份(如:01、02等):02 请输入出生日期(如:01、02等):05 Tom,您好!你的星座为:水瓶座 ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值