一个用ranknet优化排序模型的demo

一个用ranknet优化排序模型的demo

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset

class RankNetDataset(Dataset):
    def __init__(self, features, labels):
        self.features = features
        self.labels = labels
        
    def __len__(self):
        return len(self.features)
    
    def __getitem__(self, idx):
        feature = self.features[idx]
        label = self.labels[idx]
        return feature, label

class RankNetModel(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(RankNetModel, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, 1)
        
    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out

def ranknet_loss(output1, output2, target):
    sigmoid = nn.Sigmoid()
    prob1 = sigmoid(output1 - output2)
    prob2 = 1 - prob1
    loss = -torch.mean(target * torch.log(prob1) + (1 - target) * torch.log(prob2))
    return loss

# 示例输入数据
features = torch.tensor([[0.2, 0.1], [0.5, 0.3], [0.4, 0.2], [0.8, 0.7]])
labels = torch.tensor([1, 0, 1, 0])

# 超参数
batch_size = 2
input_size = features.shape[1]
hidden_size = 16
learning_rate = 0.01
num_epochs = 10

# 创建数据集和数据加载器
dataset = RankNetDataset(features, labels)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# 初始化模型和优化器
model = RankNetModel(input_size, hidden_size)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 训练模型
model.train()

for epoch in range(num_epochs):
    total_loss = 0
    
    for feature, label in dataloader:
        optimizer.zero_grad()
        
        output = model(feature)
        loss = ranknet_loss(output[0], output[1], label.float())
        
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
    
    average_loss = total_loss / len(dataloader)
    print(f"Epoch {epoch+1}/{num_epochs}, RankNet Loss: {average_loss}")

# 在测试集上评估模型
model.eval()

test_features = torch.tensor([[0.3, 0.4], [0.7, 0.5]])
test_labels = torch.tensor([1, 0])

test_dataset = RankNetDataset(test_features, test_labels)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

for feature, label in test_dataloader:
    output = model(feature)
    predicted_labels = torch.round(torch.sigmoid(output)).flatten().tolist()
    
    print("Predicted Labels:")
    print(predicted_labels)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值