Python实现分配问题常见求解算法——竞拍算法(Auction Algorithm)

基于python语言,实现竞拍算法(Auction Algorithm)对分配(指派)问题(Assignment Problem)进行求解。

1. 适用场景

  • 单一分配问题
  • 完全分配问题

2. 案例

假设N个人对N个物品进行竞拍,每个人对每个物品的估值如下表。寻求使得整体利益最大化的分配策略。

商品B1商品B2商品B3商品B4
竞拍者A1481710
竞拍者A21418177
竞拍者A3951410
竞拍者A41718146

3. 代码实现

(1)报价函数

#根据最新价格确定下一轮报价
def bidding(current_assignment,current_prices):
    new_bid={}
    for bidder in bidder_set:
        if bidder in current_assignment:
            continue
        #未竞拍到物品的继续报价
        else:
            v_ij=[]
            available_object_list=copy.deepcopy(available_objects_for_bidders[bidder])
            for object in available_object_list:
                v_ij.append(value_matrix[bidder,object]-current_prices[object])
            max_v=max(v_ij) #最大收益
            bid_target=available_object_list[v_ij.index(max_v)] #最大收益对象
            v_ij.remove(max_v)
            available_object_list.remove(bid_target)
            sub_max_v=max(v_ij) #次大收益
            bid_price=value_matrix[bidder,bid_target]-sub_max_v+epsilon #下次报价
            if bid_target in new_bid:
                new_bid[bid_target][0].append(bidder)
                new_bid[bid_target][1].append(bid_price)
            else:
                new_bid[bid_target]=[[bidder],[bid_price]]
    return new_bid

(2)分配函数

#根据最新报价重新分配竞拍结果
def assigning(bid_prices,current_assignment,current_prices):
    new_prices={}
    for object in object_set:
        #收到报价的物品重新竞拍
        if object in bid_prices:
            bidders,prices=bid_prices[object]
            max_price=max(prices)
            bidder=bidders[prices.index(max_price)]
            new_prices[object]=max_price
            for people,object_ in current_assignment.items():
                if object_ == object:
                    current_assignment.pop(people)
                    current_assignment[bidder]=object
                    break
            if bidder not in current_assignment:
                current_assignment[bidder]=object
        else:
            new_prices[object]=current_prices[object]
    return new_prices,current_assignment

(3)主函数

这里采用固定ε值,为提高效率也可采用变动值,具体可阅读文末参考1.

if __name__=='__main__':
    #价格矩阵
    value_matrix={
        ('A1', 'B1'): 4 , ('A1', 'B2'): 8 , ('A1', 'B3'): 17, ('A1', 'B4'): 10,
        ('A2', 'B1'): 14, ('A2', 'B2'): 18, ('A2', 'B3'): 17, ('A2', 'B4'): 7,
        ('A3', 'B1'): 9 , ('A3', 'B2'): 5 , ('A3', 'B3'): 14, ('A3', 'B4'): 10,
        ('A4', 'B1'): 17, ('A4', 'B2'): 18, ('A4', 'B3'): 14, ('A4', 'B4'): 6,
    }
    #竞拍者集合
    bidder_set=['A1','A2','A3','A4']
    #竞拍对象集合
    object_set = ['B1', 'B2', 'B3', 'B4']
    #每个竞拍者可竞拍对象集合
    available_objects_for_bidders={
        'A1': ['B1', 'B2', 'B3', 'B4'],
        'A2': ['B1', 'B2', 'B3', 'B4'],
        'A3': ['B1', 'B2', 'B3', 'B4'],
        'A4': ['B1', 'B2', 'B3', 'B4'],
    }
    #初始竞拍结果(满足ε互补松弛条件)
    current_assignment = {'A1': 'B3', 'A2': 'B2'}
    #初始报价
    current_prices = {'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0}
    #ε阈值
    epsilon=1.0
    #竞拍过程
    k=1
    while True:
        new_bid=bidding(current_assignment,current_prices)
        if len(new_bid)>0:
            new_price,new_assignment=assigning(new_bid,current_assignment,current_prices)
            current_assignment=new_assignment
            current_prices=new_price
        else:
            break
        k += 1
        print("第{}次竞拍:".format(k))
        print("\t竞拍结果为:")
        print("\t",current_assignment)
        print("\t最新定价为:")
        print("\t",current_prices)

参考

  1. Bertsekas, D.P. The auction algorithm: A distributed relaxation method for the assignment problem. Ann Oper Res 14, 105–123 (1988). https://doi.org/10.1007/BF02186476
  2. https://blog.csdn.net/weixin_47546390/article/details/108470396
EM算法是一种迭代算法,用于求解含有隐变量的概率模型的最大似然估计。下面是一个Python实现的EM算法,并求解最优算法的示例代码: ```python import numpy as np class EMAlgorithm: def __init__(self, n_components, max_iter=100, tol=1e-4): self.n_components = n_components self.max_iter = max_iter self.tol = tol def init_params(self, X): # 初始化模型参数 self.pi = np.ones(self.n_components) / self.n_components self.mu = np.random.randn(self.n_components, X.shape[1]) self.sigma = np.array([np.eye(X.shape[1]) for i in range(self.n_components)]) def e_step(self, X): # E步骤,计算每个样本属于每个高斯分布的后验概率 log_prob = np.zeros((X.shape[0], self.n_components)) for i in range(self.n_components): log_prob[:, i] = np.log(self.pi[i]) + self.log_gaussian(X, self.mu[i], self.sigma[i]) log_prob -= np.max(log_prob, axis=1, keepdims=True) prob = np.exp(log_prob) prob /= np.sum(prob, axis=1, keepdims=True) return prob def m_step(self, X, prob): # M步骤,更新模型参数 Nk = np.sum(prob, axis=0) self.pi = Nk / np.sum(Nk) for i in range(self.n_components): self.mu[i] = np.sum(X * prob[:, i][:, np.newaxis], axis=0) / Nk[i] X_centered = X - self.mu[i] self.sigma[i] = np.dot(prob[:, i] * X_centered.T, X_centered) / Nk[i] def log_gaussian(self, X, mu, sigma): # 计算多元高斯分布的对数概率密度 n_features = X.shape[1] log_det = np.log(np.linalg.det(sigma)) log_norm = -0.5 * n_features * np.log(2 * np.pi) - 0.5 * log_det X_centered = X - mu sigma_inv = np.linalg.inv(sigma) log_exp = -0.5 * np.sum(np.dot(X_centered, sigma_inv) * X_centered, axis=1) return log_norm + log_exp def fit(self, X): self.init_params(X) for i in range(self.max_iter): prob = self.e_step(X) self.m_step(X, prob) log_likelihood = np.sum(np.log(np.sum(prob, axis=1))) if i > 0 and abs(log_likelihood - prev_log_likelihood) < self.tol: break prev_log_likelihood = log_likelihood def predict(self, X): # 预测每个样本属于哪个高斯分布 log_prob = np.zeros((X.shape[0], self.n_components)) for i in range(self.n_components): log_prob[:, i] = np.log(self.pi[i]) + self.log_gaussian(X, self.mu[i], self.sigma[i]) return np.argmax(log_prob, axis=1) ``` 接下来,我们可以使用EM算法来对一个二元高斯混合模型进行拟合,求解最优算法: ```python import matplotlib.pyplot as plt from sklearn.datasets import make_blobs # 创建数据集 X, y = make_blobs(n_samples=300, centers=2, random_state=0) # 拟合二元高斯混合模型 em = EMAlgorithm(2) em.fit(X) # 绘制决策边界 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1)) Z = em.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, alpha=0.4) plt.scatter(X[:, 0], X[:, 1], c=y, alpha=0.8) plt.show() ``` 运行以上代码,即可得到最优算法的结果:二元高斯混合模型的拟合结果和决策边界图。
评论 8
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Better.C

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值