"""
带宽分配系统与自动节点调整功能
本模块实现了一个带宽分配系统,基于客户的带宽使用情况进行分配。
系统会自动调整节点的数量和容量(每个节点最大不超过400G),以满足业务需求并优化复用率。
日期:2024-10-31
"""
import datetime
import csv
import json
import logging
import time
from decimal import Decimal
from concurrent.futures import ProcessPoolExecutor, as_completed
from enum import Enum
import pandas as pd
import copy
import numpy as np
import os
import argparse
HEAP_REDUNDANT_TIME = 0
STEP_SIZE = 10.0
MAX_ADJUSTMENTS = 1000
MAX_NODE_CAPACITY = 400.0
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(),
logging.FileHandler("bandwidth_allocation.log")
]
)
def debug_time(func):
"""装饰器,用于记录函数执行时间"""
def wrapper(*args, **keyargs):
now = time.time()
logging.debug('=======before ' + func.__name__ + ' ' + str(now) + '======')
result = func(*args, **keyargs)
after = time.time()
logging.debug('=======after ' + func.__name__ + ' ' + str(after - now) + '======')
return result
return wrapper
class BillingType(Enum):
"""计费方式枚举"""
MONTHLY_95 = '月95'
PACKAGE_PORT = '包端口'
AVERAGE_TRAFFIC = '平均流量'
OTHER = '其他'
class NodeCapacity:
"""
表示网络中的一个节点及其带宽容量管理。
"""
def __init__(self, node_id, node_name, operator, capacity, plan_line, bill_rate=1.0,
total_time_slots=8640, billing_type='', node_province='', scheduling_upper_limit_percentage=1.0):
"""
初始化节点对象。
:param node_id: 节点唯一标识符
:param node_name: 节点名称
:param operator: 运营商名称
:param capacity: 节点最大容量(Gbps)
:param plan_line: 节点初始规划线(Gbps)
:param bill_rate: 节点价格系数
:param total_time_slots: 总时间槽数(5分钟一个时间槽)
:param billing_type: 计费方式,使用 BillingType 枚举
:param node_province: 节点所在省份
:param scheduling_upper_limit_percentage: 调度上限百分比,默认1.0(100%)
"""
self.node_id = node_id
self.node_name = node_name
self.operator = operator
self.capacity = capacity
self.plan_line = plan_line
self.bill_rate = bill_rate
self.billing_type = billing_type
self.node_province = node_province
self.total_time_slots = total_time_slots
self.free_minutes = self.calculate_free_minutes(self.total_time_slots)
self.assigned_bandwidths = {}
self.scheduling_upper_limit_percentage = scheduling_upper_limit_percentage
self.is_free_95_node = (self.billing_type == BillingType.MONTHLY_95 and self.bill_rate == 0)
def calculate_free_minutes(self, total_time_slots):
"""
根据总时间槽数和免计费比例,计算节点的免费超规划线时间。
:param total_time_slots: 总时间槽数
:return: 免费超规划线时间(分钟)
"""
if self.billing_type == BillingType.PACKAGE_PORT:
return 0
else:
free_slots = int(total_time_slots * 0.05)
free_minutes = free_slots * 5
return free_minutes
def adjust_free_minutes(self, minutes):
"""
调整节点的免费超规划线时间。
:param minutes: 调整的分钟数
"""
self.free_minutes += minutes
logging.debug(f"节点 {self.node_name} 调整免费时长 {minutes} 分钟,剩余免费时长:{self.free_minutes} 分钟。")
def can_deal_brick(self, brick):
"""
判断节点是否有能力处理指定的砖块。
:param brick: 砖块信息,包含持续时间('duration')
:return: 布尔值,表示是否可以处理
"""
return self.free_minutes >= (brick['duration'] + HEAP_REDUNDANT_TIME)
def get_available_capacity(self):
"""
节点的可用能力,单位为 Gbps
"""
adjusted_capacity = self.capacity * self.scheduling_upper_limit_percentage
return adjusted_capacity - self.plan_line
def assign_bandwidth(self, bandwidth, timestamp):
"""
为节点分配带宽并记录时间
:param bandwidth: 分配的带宽(Gbps)
:param timestamp: 时间戳
"""
if bandwidth <= 0:
logging.debug(f"节点 {self.node_name} 在 {timestamp} 分配带宽为0,跳过。")
return
if timestamp in self.assigned_bandwidths:
self.assigned_bandwidths[timestamp] += bandwidth
else:
self.assigned_bandwidths[timestamp] = bandwidth
logging.debug(
f"节点 {self.node_name} 在 {timestamp} 分配了 {bandwidth:.2f} Gbps。"
)
def increase_plan_line(self, increment):
"""
增加节点的规划线
:param increment: 增加的规划线值(Gbps)
:return: 是否成功增加
"""
adjusted_capacity = self.capacity * self.scheduling_upper_limit_percentage
new_plan_line = self.plan_line + increment
if new_plan_line > adjusted_capacity:
logging.warning(f"节点 {self.node_name} 的规划线已达到最大容量,无法再增加。")
return False
old_plan_line = self.plan_line
self.plan_line = new_plan_line
logging.info(f"节点 {self.node_name} 的规划线从 {old_plan_line} Gbps 增加至 {self.plan_line} Gbps。")
return True
def log_current_status(self):
"""
记录当前节点的关键信息,包括规划线和剩余免费时长。
"""
logging.info(
f"节点信息:{self.node_name} | 规划线:{self.plan_line} Gbps | "
f"剩余免费时长:{self.free_minutes} 分钟 | 价格系数:{self.bill_rate} | 计费方式:{self.billing_type.value}"
)
def calculate_cost(self):
"""
计算节点的付费成本。
"""
if self.billing_type == BillingType.MONTHLY_95:
assigned_values = list(self.assigned_bandwidths.values())
if assigned_values:
node_95 = np.percentile(assigned_values, 95)
return node_95 * self.bill_rate
else:
return 0.0
elif self.billing_type == BillingType.PACKAGE_PORT:
return self.capacity * self.bill_rate
elif self.billing_type == BillingType.AVERAGE_TRAFFIC:
assigned_values = list(self.assigned_bandwidths.values())
if assigned_values:
avg_bandwidth = np.mean(assigned_values)
return avg_bandwidth * self.bill_rate
else:
return 0.0
else:
return 0.0
class BandwidthDemand:
"""
表示一个带宽需求实例。
"""
def __init__(self, operator, province, timestamp, demand_value):
self.operator = operator
self.province = province
self.timestamp = timestamp
self.demand_value = demand_value
class FileReader:
"""文件读取器"""
@staticmethod
def read_demand_data(file_path):
"""
读取带宽需求数据
:param file_path: 带宽需求数据文件的路径
:return: 按运营商和省份分组的带宽需求字典
"""
try:
demand_df = pd.read_csv(file_path)
demand_df['ts'] = pd.to_datetime(demand_df['ts'])
bandwidth_demands = {}
for _, row in demand_df.iterrows():
operator = row['isp_cn']
province = row['node_province']
timestamp = row['ts']
demand_value = float(row['switch_gbps'])
key = (operator, province)
if key not in bandwidth_demands:
bandwidth_demands[key] = []
bandwidth_demands[key].append(BandwidthDemand(operator, province, timestamp, demand_value))
logging.info(f"成功读取带宽需求数据,共 {sum(len(v) for v in bandwidth_demands.values())} 条记录。")
return bandwidth_demands
except Exception as e:
logging.error(f"读取带宽需求数据失败:{e}")
return {}
def prepare_bricks(total_plan_line, bandwidth_demands, time_slot=45):
"""
根据总规划线和带宽需求,生成砖块
:param total_plan_line: 总规划线(Gbps)
:param bandwidth_demands: 带宽需求列表
:param time_slot: 砖块的时间槽大小(分钟)
:return: bricks 字典
"""
if not bandwidth_demands:
return {}
demand_df = pd.DataFrame([(d.timestamp, d.demand_value) for d in bandwidth_demands], columns=['ts', 'demand'])
demand_df.sort_values('ts', inplace=True)
bricks = {}
brick_id = 0
current_brick = None
for _, row in demand_df.iterrows():
timestamp = row['ts']
demand = row['demand']
excess = demand - total_plan_line
if excess <= 0:
continue
if current_brick is None:
current_brick = {
'start_time': timestamp,
'end_time': timestamp,
'duration': 5,
'top_height': excess,
'id': brick_id
}
brick_id += 1
else:
duration = (timestamp - current_brick['start_time']).total_seconds() / 60 + 5
if duration <= time_slot:
current_brick['end_time'] = timestamp
current_brick['duration'] = duration
current_brick['top_height'] = max(current_brick['top_height'], excess)
else:
bricks[current_brick['id']] = current_brick
logging.debug(f"生成砖块 {current_brick['id']}: {current_brick}")
current_brick = {
'start_time': timestamp,
'end_time': timestamp,
'duration': 5,
'top_height': excess,
'id': brick_id
}
brick_id += 1
if current_brick is not None:
bricks[current_brick['id']] = current_brick
logging.debug(f"生成砖块 {current_brick['id']}: {current_brick}")
logging.info(f"生成了 {len(bricks)} 个砖块。")
return bricks
def calculate_sols(sol, only_one=True):
"""
递归计算堆高组合
:param sol: 当前解决方案的字典结构
:param only_one: 是否只寻找一个组合
:return: 布尔值,表示是否找到组合
"""
result = False
capacities = sol['can_use_capacities']
nodeid2capacity = {capacity.node_id: capacity for capacity in capacities}
if len(sol['bricks']) <= 0:
return True
else:
bricks_sorted = sorted([brick for brick in sol['bricks'].values()], key=lambda x: -x['top_height'])
current_brick = bricks_sorted[0]
eligible_nodes = [node for node in capacities if node.can_deal_brick(current_brick)]
node_combinations = get_nodes_combination(current_brick, eligible_nodes, only_one=True)
logging.debug("为砖块 %s 计算出的节点组合:\n%s", current_brick['id'],
json.dumps(node_combinations, indent=4, ensure_ascii=False))
if not node_combinations:
sol['children'] = False
return False
for node_combination in node_combinations:
for node_id in node_combination:
capacity = nodeid2capacity.get(node_id)
if capacity:
capacity.adjust_free_minutes(-current_brick['duration'] - HEAP_REDUNDANT_TIME)
updated_bricks = copy.deepcopy(sol['bricks'])
del updated_bricks[current_brick['id']]
child_sol = {
'parent': sol,
'children': [],
'can_use_capacities': capacities,
'nodes': node_combination,
'brick': current_brick,
'bricks': updated_bricks
}
sol['children'].append(child_sol)
s_result = calculate_sols(child_sol, only_one)
result = result or s_result
for node_id in node_combination:
capacity = nodeid2capacity.get(node_id)
if capacity:
capacity.adjust_free_minutes(current_brick['duration'] + HEAP_REDUNDANT_TIME)
if s_result and only_one:
break
return result
def has_sols(bricks, capacities):
"""
对砖块和节点,计算最合适的堆高组合,返回(组合是否满足所有砖块的堆高,具体组合)
:param bricks: 砖块字典
:param capacities: 节点容量列表
:return: (是否存在堆高组合, 组合方案字典)
"""
solution = {
'parent': None,
'children': [],
'nodes': None,
'can_use_capacities': capacities,
'brick': None,
'bricks': bricks
}
result = calculate_sols(solution)
return result, solution
def get_ignore_node_ids(current_node: dict) -> list:
"""
获取需要忽略的节点ID列表,避免在兄弟节点中重复使用。
:param current_node: 当前节点的字典结构
:return: 忽略的节点ID列表
"""
ignore_node_ids = []
parent = current_node.get('parent')
while parent is not None:
for child in parent.get('children', []):
ignore_node_ids.append(child['node_id'])
parent = parent.get('parent')
return ignore_node_ids
def calculate_nodes_combination(current_node: dict, ignore_node_ids: list = None, only_one: bool = True) -> bool:
"""
递归计算某个堆高高度下是否存在节点组合能满足需求。
:param current_node: 当前状态字典
:param ignore_node_ids: 忽略的节点ID列表
:param only_one: 是否只寻找一个组合
:return: 布尔值,表示是否找到组合
"""
if ignore_node_ids is None:
ignore_node_ids = get_ignore_node_ids(current_node)
logging.debug(f"当前组合计算: val={current_node['val']}, 忽略节点={ignore_node_ids}")
success = False
if current_node['val'] <= 0:
logging.debug("目标高度已满足。")
return True
total_capacity = sum(node.get_available_capacity() for node in current_node['capacities'])
if current_node['val'] > total_capacity:
logging.debug("节点总能力不足以满足需求。")
return False
sorted_nodes = sorted(
current_node['capacities'],
key=lambda n: n.get_available_capacity(),
reverse=True
)
for node in sorted_nodes:
if node.node_id in ignore_node_ids:
continue
ignore_node_ids.append(node.node_id)
remaining_val = current_node['val'] - node.get_available_capacity()
child_val = max(remaining_val, 0)
child_node = {
'children': [],
'parent': current_node,
'node_id': node.node_id,
'val': child_val,
'capacities': current_node['capacities']
}
logging.debug(
f"尝试节点 {node.node_id} ({node.node_name}), 当前val={current_node['val']} -> "
f"child_val={child_val}, 节点可用容量={node.get_available_capacity()} Gbps, "
f"累加后剩余需求={child_val}"
)
current_node['children'].append(child_node)
result = calculate_nodes_combination(child_node, copy.deepcopy(ignore_node_ids), only_one)
success = success or result
if not result:
ignore_node_ids.pop()
if result and only_one:
break
if not success:
logging.debug("组合失败,未找到合适的节点组合。")
return success
def get_nodes_combination(brick: dict, eligible_nodes: list, only_one: bool = True) -> list:
"""
获取满足砖块需求的节点组合。
:param brick: 当前砖块信息
:param eligible_nodes: 可用节点列表
:param only_one: 是否只寻找一个组合
:return: 节点组合列表
"""
logging.debug(f"开始计算砖块 {brick['id']} 的节点组合,目标高度={brick['top_height']}")
h = {
'children': [],
'parent': None,
'node_id': None,
'val': brick['top_height'],
'capacities': eligible_nodes
}
if not eligible_nodes:
logging.debug("警告:传入的节点容量列表为空,无法计算组合。")
return []
logging.debug(f"初始可用节点容量数:{len(eligible_nodes)}")
for node in eligible_nodes:
logging.debug(f"节点 {node.node_id} ({node.node_name}) - 可用容量:{node.get_available_capacity()} Gbps")
calculate_nodes_combination(h, [], only_one)
combination_result = get_combination(h)
logging.debug(f"组合结果为: {combination_result}")
return combination_result
def get_combination(h):
"""
遍历组合树,返回所有有效的节点组合
:param h: 当前节点的字典结构
:return: 节点组合列表
"""
combination = []
if h['val'] > 0 and len(h['children']) == 0:
return []
if len(h['children']) > 0:
for child in h['children']:
child_combinations = get_combination(child)
if len(child_combinations) == 0:
continue
for child_combination in child_combinations:
if h['node_id'] is not None:
child_combination.append(h['node_id'])
combination.append(child_combination)
else:
combination.append(child_combination)
else:
if h['node_id'] is not None:
combination = [[h['node_id']]]
return combination
def extract_allocation_plan(sol):
"""
从堆高组合中提取每个砖块的分配方案
:param sol: 堆高组合方案
:return: allocation_plan 列表,包含每个砖块的分配信息
"""
allocation_plan = []
def traverse(sol):
if sol['brick'] is not None:
brick = sol['brick']
nodes = sol['nodes']
allocation_plan.append({
'start_time': brick['start_time'],
'end_time': brick['end_time'],
'duration': brick['duration'],
'top_height': brick['top_height'],
'nodes': nodes
})
logging.info(
f"砖块 {brick['id']} 分配: 时间范围 {brick['start_time']} - {brick['end_time']} "
f"持续时间 {brick['duration']} 分钟,超出规划线 {brick['top_height']:.2f} Gbps,"
f"分配节点: {', '.join(nodes)}"
)
for node_id in nodes:
node = next((n for n in sol['can_use_capacities'] if n.node_id == node_id), None)
if node:
allocated = node.assigned_bandwidths.get(brick['start_time'], 0)
logging.debug(f"节点 {node.node_name} 在 {brick['start_time']} 分配了 {allocated} Gbps。")
for child in sol.get('children', []):
traverse(child)
traverse(sol)
return allocation_plan
def allocate_initial_bandwidth(timestamp, demand_value, nodes):
"""
分配初始带宽,包括包端口节点和价格为0的95计费节点。
返回已分配的带宽总和。
"""
logging.debug(f"开始初始分配时间点 {timestamp} 的需求 {demand_value} Gbps。")
package_nodes = [node for node in nodes if node.billing_type == BillingType.PACKAGE_PORT]
free_95_nodes = [node for node in nodes if node.is_free_95_node]
initial_nodes = package_nodes + free_95_nodes
total_allocated = 0
remaining_demand = demand_value
if initial_nodes and remaining_demand > 0:
for node in initial_nodes:
allocation = min(node.capacity * node.scheduling_upper_limit_percentage * 0.88, remaining_demand)
node.assign_bandwidth(allocation, timestamp)
total_allocated += allocation
remaining_demand -= allocation
if remaining_demand <= 0:
break
return total_allocated
def allocate_average_traffic_bandwidth(timestamp, demand_value, nodes):
"""
分配平均流量带宽,仅在18:00到22:00之间分配到平均流量节点。
返回已分配的带宽总和。
"""
logging.debug(f"开始分配平均流量节点时间点 {timestamp} 的需求 {demand_value} Gbps。")
if not (18 <= timestamp.hour < 22):
logging.debug(f"时间点 {timestamp} 不在 18:00 到 22:00 之间,跳过平均流量节点分配。")
return 0
average_nodes = [node for node in nodes if node.billing_type == BillingType.AVERAGE_TRAFFIC]
total_allocated = 0
remaining_demand = demand_value
if average_nodes and remaining_demand > 0:
for node in average_nodes:
allocation = min(node.capacity * node.scheduling_upper_limit_percentage * 0.88, remaining_demand)
node.assign_bandwidth(allocation, timestamp)
total_allocated += allocation
remaining_demand -= allocation
if remaining_demand <= 0:
break
return total_allocated
def allocate_remaining_bandwidth(timestamp, demand_value, nodes, total_plan_line, allocation_plan):
"""
分配剩余的带宽需求,主要针对非免费月95计费节点。
"""
logging.debug(f"开始分配剩余需求时间点 {timestamp} 的需求 {demand_value} Gbps。")
billing_nodes = [node for node in nodes if node.billing_type == BillingType.MONTHLY_95 and not node.is_free_95_node]
remaining_demand = demand_value
total_plan_line = sum(node.plan_line for node in billing_nodes)
if remaining_demand <= total_plan_line:
total_allocated = 0
allocations = []
num_nodes = len(billing_nodes)
for i, node in enumerate(billing_nodes):
if i < num_nodes - 1:
allocation = remaining_demand * (node.plan_line / total_plan_line)
allocation = round(allocation, 10)
node.assign_bandwidth(allocation, timestamp)
total_allocated += allocation
allocations.append(allocation)
logging.debug(f"节点 {node.node_name} 在 {timestamp} 分配了 {allocation:.10f} Gbps(平均分配)。")
else:
allocation = remaining_demand - total_allocated
node.assign_bandwidth(allocation, timestamp)
allocations.append(allocation)
logging.debug(f"节点 {node.node_name} 在 {timestamp} 分配了 {allocation:.10f} Gbps(平均分配,调整后)。")
else:
total_allocated = 0
for node in billing_nodes:
plan_line = node.plan_line
if remaining_demand > 0:
allocation = min(remaining_demand, plan_line)
node.assign_bandwidth(allocation, timestamp)
total_allocated += allocation
remaining_demand -= allocation
logging.debug(f"月95计费节点在 {timestamp} 分配了 {total_allocated:.2f} Gbps(规划线部分)。")
if remaining_demand > 0 and allocation_plan:
allocated = False
for plan in allocation_plan:
if plan['start_time'] <= timestamp <= plan['end_time']:
nodes_in_plan = [node for node in nodes if node.node_id in plan['nodes']]
total_available_capacity = sum(node.capacity * node.scheduling_upper_limit_percentage - node.assigned_bandwidths.get(timestamp, 0) for node in nodes_in_plan)
if total_available_capacity <= 0:
continue
for node in nodes_in_plan:
node_available_capacity = node.capacity * node.scheduling_upper_limit_percentage - node.assigned_bandwidths.get(timestamp, 0)
node_excess_allocation = node_available_capacity / total_available_capacity * remaining_demand
node_excess_allocation = min(node_excess_allocation, node_available_capacity)
node.assign_bandwidth(node_excess_allocation, timestamp)
allocated = True
logging.info(f"时间点 {timestamp} 的剩余需求 {remaining_demand:.2f} Gbps 已分配到堆高方案节点。")
remaining_demand = 0
break
if not allocated:
logging.warning(f"时间点 {timestamp} 的需求 {remaining_demand:.2f} Gbps 超出可分配范围,未分配。")
if remaining_demand > 0:
logging.warning(f"时间点 {timestamp} 的需求 {remaining_demand:.2f} Gbps 无法分配,超出所有节点容量。")
def save_to_csv(nodes, timestamps, operator, province, total_demand_dict, filename='bandwidth_allocation_summary.csv'):
"""
将带宽分配详情按时间节点写入CSV文件
:param nodes: 所有节点对象列表
:param timestamps: 所有时间点列表
:param operator: 运营商名称
:param province: 省份名称
:param total_demand_dict: 每个时间点的总带宽需求
:param filename: CSV文件名
"""
try:
with open(filename, 'w', newline='') as csvfile:
fieldnames = ['Operator', 'Province', 'Timestamp', 'Total_Demand'] + [node.node_name for node in nodes]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for timestamp in timestamps:
row = {
'Operator': operator,
'Province': province,
'Timestamp': timestamp.strftime('%Y-%m-%d %H:%M:%S'),
'Total_Demand': f"{total_demand_dict.get(timestamp, 0):.2f}"
}
for node in nodes:
bandwidth = node.assigned_bandwidths.get(timestamp, 0)
row[node.node_name] = f"{bandwidth:.2f}"
writer.writerow(row)
logging.info(f"带宽分配明细已保存到 {filename}")
except Exception as e:
logging.error(f"保存带宽分配结果到CSV失败:{e}")
def automatic_plan_line_adjustment(nodes, bandwidth_demands, step_size=STEP_SIZE):
"""
自动调整月95计费节点的规划线,直到找到可行的堆高方案或无法继续调整。
"""
adjustment_attempts = 0
billing_nodes = [node for node in nodes if node.billing_type == BillingType.MONTHLY_95 and not node.is_free_95_node]
if not billing_nodes:
logging.warning("没有可调整的月95计费节点,无法进行提线调整。")
return []
nodes_sorted = sorted(billing_nodes, key=lambda x: x.bill_rate)
num_nodes = len(nodes_sorted)
current_node_index = 0
while adjustment_attempts < MAX_ADJUSTMENTS:
total_plan_line = sum(node.plan_line for node in billing_nodes)
total_capacity = sum(node.capacity * node.scheduling_upper_limit_percentage for node in billing_nodes)
max_demand = max(demand.demand_value for demand in bandwidth_demands)
logging.info(f"尝试总规划线 {total_plan_line} Gbps,节点总容量 {total_capacity} Gbps,最大需求 {max_demand} Gbps。")
if total_capacity < max_demand:
logging.error("节点总容量不足以满足最大带宽需求。")
break
if max_demand > total_plan_line:
bricks = prepare_bricks(total_plan_line, bandwidth_demands)
if not bricks:
logging.warning("没有生成任何砖块,所有带宽需求均未超过总规划线。")
return []
success, sol = has_sols(bricks, billing_nodes)
if success:
logging.info("找到可行的堆高组合。")
allocation_plan = extract_allocation_plan(sol)
return allocation_plan
else:
logging.info("未找到可行的堆高组合。")
allocation_plan = []
else:
logging.info("需求未超出总规划线,无需堆高。")
return []
adjustment_made = False
attempts = 0
while attempts < num_nodes:
node = nodes_sorted[current_node_index]
adjusted_capacity = node.capacity * node.scheduling_upper_limit_percentage
if node.plan_line < adjusted_capacity:
logging.info(f"尝试为节点 {node.node_name} 提线 {step_size} Gbps。")
success_adjust = node.increase_plan_line(step_size)
if success_adjust:
adjustment_attempts += 1
adjustment_made = True
logging.info(f"已为节点 {node.node_name} 提线至 {node.plan_line} Gbps。")
current_node_index = (current_node_index + 1) % num_nodes
break
else:
logging.info(f"节点 {node.node_name} 已无法提线,跳过。")
else:
logging.info(f"节点 {node.node_name} 已达到最大容量,跳过。")
current_node_index = (current_node_index + 1) % num_nodes
attempts += 1
if not adjustment_made:
logging.warning("所有节点的规划线已达到最大容量,无法再提线。")
break
logging.error("未能找到可行的带宽分配方案,即使在调整规划线后。")
return None
def calculate_reuse_rate(original_bandwidth_demands, assigned_bandwidths, nodes):
"""
计算复用率。
复用率计算 = 1 - (对应地区节点95值累加 / 总带宽95值)
:param original_bandwidth_demands: 原始带宽需求列表
:param assigned_bandwidths: 节点分配的带宽字典(时间戳: 总分配带宽)
:param nodes: 节点列表(包括所有95计费节点)
:return: (复用率, 总节点95值, 总需求95值)
"""
if not original_bandwidth_demands or not nodes:
return None
demand_values = [demand.demand_value for demand in original_bandwidth_demands]
demand_95 = np.percentile(demand_values, 95)
billing_nodes = [node for node in nodes if node.billing_type == BillingType.MONTHLY_95]
node_95_values = []
for node in billing_nodes:
assigned_values = list(node.assigned_bandwidths.values())
if assigned_values:
node_95 = np.percentile(assigned_values, 95)
node_95_values.append(node_95)
total_node_95 = sum(node_95_values)
if demand_95 == 0:
return None
reuse_rate = 1 - (total_node_95 / demand_95)
return reuse_rate, total_node_95, demand_95
def process_operator_province(operator, province, demands, province_limits,
output_dir='output', reduction_percentage=0.0,
adjust_limits=True, enable_automatic_plan_line_adjustment=True):
"""
处理单个运营商和省份的带宽分配和优化。
:param operator: 运营商名称
:param province: 省份名称
:param demands: 带宽需求列表
:param province_limits: 省份限制集合
:param output_dir: 输出目录
:param reduction_percentage: 当前的免费时长减少比例
:param adjust_limits: 是否进行免费时长调减和调度上限调整,默认为True
:param enable_automatic_plan_line_adjustment: 是否启用自动提线,默认为True
:return: (operator, province, sum_upper_limit_bandwidth, sum_guaranteed_bandwidth, total_node_95, demand_95, reuse_rate, total_cost)
"""
logging.info(f"开始处理运营商 {operator},省份 {province}。")
original_demands = demands.copy()
if (operator, province) in province_limits:
logging.info(f"根据省份限制,运营商 {operator},省份 {province} 只使用本省节点。")
demands = [demand for demand in demands if demand.province == province]
logging.info(f"处理 {operator} {province} 的本省带宽需求,共 {len(demands)} 条记录。")
else:
demands = aggregate_demands_by_operator(operator, demands)
original_demands = demands.copy()
logging.info(f"处理运营商 {operator} 的聚合带宽需求,共 {len(demands)} 条记录。")
province = '无限制'
nodes = initialize_nodes(operator, province, demands)
logging.info(f"初始化节点完成,共 {len(nodes)} 个节点。")
if not nodes:
logging.warning(f"运营商 {operator},省份 {province} 没有可用节点,跳过。")
return operator, province, 0, 0, 0, 0, None, 0.0
timestamps = sorted({demand.timestamp for demand in demands})
remaining_demands = {}
total_demand_dict = {}
for demand in demands:
timestamp = demand.timestamp
demand_value = demand.demand_value
total_demand_dict[timestamp] = demand_value
allocated_bandwidth = allocate_initial_bandwidth(timestamp, demand_value, nodes)
remaining_demand_value = demand_value - allocated_bandwidth
remaining_demands[timestamp] = remaining_demand_value
for demand in demands:
timestamp = demand.timestamp
remaining_demand_value = remaining_demands[timestamp]
allocated_bandwidth = allocate_average_traffic_bandwidth(timestamp, remaining_demand_value, nodes)
remaining_demands[timestamp] -= allocated_bandwidth
billing_nodes = [node for node in nodes if node.billing_type == BillingType.MONTHLY_95 and not node.is_free_95_node]
if not billing_nodes:
logging.warning(f"运营商 {operator},省份 {province} 没有可调整的月95计费节点,跳过。")
return operator, province, 0, 0, 0, 0, None, 0.0
total_plan_line = sum(node.plan_line for node in billing_nodes)
logging.info(f"运营商 {operator},省份 {province} 的可调整月95计费节点总规划线为 {total_plan_line} Gbps。")
remaining_bandwidth_demands = [
BandwidthDemand(operator, province, timestamp, remaining_demands[timestamp])
for timestamp in timestamps if remaining_demands[timestamp] > 0
]
if enable_automatic_plan_line_adjustment:
allocation_plan = automatic_plan_line_adjustment(
billing_nodes,
remaining_bandwidth_demands,
step_size=STEP_SIZE
)
if allocation_plan is None:
logging.error(f"运营商 {operator},省份 {province} 的带宽分配失败,未找到可行的方案。")
return operator, province, 0, 0, 0, 0, None, 0.0
else:
total_plan_line = sum(node.plan_line for node in billing_nodes)
logging.info(f"使用当前的总规划线 {total_plan_line} Gbps 进行堆高方案计算。")
bricks = prepare_bricks(total_plan_line, remaining_bandwidth_demands)
if bricks:
success, sol = has_sols(bricks, billing_nodes)
if success:
logging.info("找到可行的堆高组合。")
allocation_plan = extract_allocation_plan(sol)
else:
logging.error("未能找到可行的堆高方案,带宽分配失败。")
return operator, province, 0, 0, 0, 0, None, 0.0
else:
logging.info("需求未超出总规划线,无需堆高。")
allocation_plan = []
logging.info(f"运营商 {operator},省份 {province} 的带宽分配与{'自动提线' if enable_automatic_plan_line_adjustment else '堆高方案计算'}完成。")
for demand in remaining_bandwidth_demands:
allocate_remaining_bandwidth(
demand.timestamp,
demand.demand_value,
nodes,
total_plan_line,
allocation_plan
)
reduction_str = f"{int(reduction_percentage * 100)}%"
if province == '':
province = '全部省份'
sub_dir = os.path.join(output_dir, f"减免{reduction_str}_")
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
output_filename = os.path.join(sub_dir, f"{operator}_{province}.csv")
save_to_csv(nodes, timestamps, operator, province, total_demand_dict, filename=output_filename)
total_demand_values = [total_demand_dict[timestamp] for timestamp in timestamps]
assigned_bandwidths = {}
for timestamp in timestamps:
assigned_bandwidths[timestamp] = sum(node.assigned_bandwidths.get(timestamp, 0) for node in nodes if node.billing_type == BillingType.MONTHLY_95)
reuse_rate_result = calculate_reuse_rate(
original_demands,
assigned_bandwidths,
nodes
)
if reuse_rate_result is not None:
reuse_rate, total_node_95, demand_95 = reuse_rate_result
logging.info(f"运营商 {operator},省份 {province} 的复用率为 {reuse_rate:.4f}。")
else:
reuse_rate = None
total_node_95 = 0
demand_95 = 0
logging.info(f"运营商 {operator},省份 {province} 的复用率无法计算。")
sum_upper_limit_bandwidth = sum(node.capacity * node.scheduling_upper_limit_percentage for node in nodes if node.billing_type == BillingType.MONTHLY_95)
sum_guaranteed_bandwidth = sum(node.plan_line for node in nodes if node.billing_type == BillingType.MONTHLY_95)
total_cost = 0.0
for node in nodes:
node_cost = node.calculate_cost()
total_cost += node_cost
logging.info(f"运营商 {operator},省份 {province} 的总付费成本为 {total_cost:.2f}。")
return (operator, province, sum_upper_limit_bandwidth, sum_guaranteed_bandwidth, total_node_95, demand_95, reuse_rate, total_cost)
def aggregate_demands_by_operator(operator, demands):
"""
将同一运营商的所有省份带宽需求在每个时间点聚合
:param operator: 运营商名称
:param demands: 带宽需求列表
:return: 聚合后的带宽需求列表
"""
demand_df = pd.DataFrame([(d.timestamp, d.demand_value) for d in demands], columns=['timestamp', 'demand_value'])
aggregated_df = demand_df.groupby('timestamp').sum().reset_index()
aggregated_demands = [BandwidthDemand(operator, '', row['timestamp'], row['demand_value']) for _, row in aggregated_df.iterrows()]
return aggregated_demands
def process_all(operator_province_pairs, bandwidth_demands_dict, province_limits,
output_dir='output', reduction_percentage=0.0, adjust_limits=True, enable_automatic_plan_line_adjustment=True):
"""
并发处理所有运营商和省份的带宽分配任务。
:param operator_province_pairs: 运营商和省份的元组列表
:param bandwidth_demands_dict: 按运营商和省份分组的带宽需求字典
:param province_limits: 省份限制集合
:param output_dir: 输出目录
:param reduction_percentage: 当前的免费时长减少比例
:param adjust_limits: 是否进行免费时长调减和调度上限调整
:param enable_automatic_plan_line_adjustment: 是否启用自动提线
:return: 结果列表
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
results = []
with ProcessPoolExecutor() as executor:
futures = []
for (operator, province) in operator_province_pairs:
if (operator, province) in province_limits:
demands = bandwidth_demands_dict.get((operator, province), [])
if not demands:
logging.warning(f"运营商 {operator},省份 {province} 没有带宽需求,跳过。")
continue
futures.append(
executor.submit(
process_operator_province,
operator,
province,
demands,
province_limits,
output_dir,
reduction_percentage,
adjust_limits,
enable_automatic_plan_line_adjustment
)
)
operators = set([op for op, _ in operator_province_pairs])
for operator in operators:
unlimited_provinces = [province for op, province in operator_province_pairs if
op == operator and (op, province) not in province_limits]
if not unlimited_provinces:
continue
demands = []
for province in unlimited_provinces:
demands.extend(bandwidth_demands_dict.get((operator, province), []))
if not demands:
logging.warning(f"运营商 {operator} 没有无限制省份的带宽需求,跳过。")
continue
futures.append(
executor.submit(
process_operator_province,
operator,
'无限制',
demands,
province_limits,
output_dir,
reduction_percentage,
adjust_limits,
enable_automatic_plan_line_adjustment
)
)
for future in as_completed(futures):
result = future.result()
results.append(result)
return results
def initialize_nodes(operator, province, demands):
"""
根据业务需求自动初始化节点列表,调整节点数量和容量。
:param operator: 运营商名称
:param province: 省份名称
:param demands: 带宽需求列表
:return: 初始化的节点列表
"""
max_demand = max(demand.demand_value for demand in demands)
total_capacity_needed = max_demand * 1.1
max_node_capacity = MAX_NODE_CAPACITY
num_nodes = int(np.ceil(total_capacity_needed / max_node_capacity))
node_capacity = total_capacity_needed / num_nodes
if node_capacity > max_node_capacity:
node_capacity = max_node_capacity
nodes = []
for i in range(num_nodes):
node_id = f"{operator}_{province}_node_{i+1}"
node_name = f"{operator}_{province}_node_{i+1}"
capacity = node_capacity
plan_line = capacity * 0.3
node = NodeCapacity(
node_id=node_id,
node_name=node_name,
operator=operator,
capacity=capacity,
plan_line=plan_line,
bill_rate=1.0,
billing_type=BillingType.MONTHLY_95,
node_province=province
)
nodes.append(node)
logging.info(f"根据需求初始化节点,共 {num_nodes} 个,每个节点容量 {node_capacity:.2f} Gbps。")
return nodes
def main():
"""
主程序入口。
"""
logging.info("带宽分配系统启动。")
parser = argparse.ArgumentParser(description='带宽分配系统')
parser.add_argument('--operator', type=str, help='指定运营商名称')
parser.add_argument('--province', type=str, help='指定省份名称')
parser.add_argument('--adjust-method', type=str, choices=['reduce_free_minutes', 'reduce_scheduling_upper_limit', 'none'],
default='reduce_free_minutes', help='选择调整方式:reduce_free_minutes、reduce_scheduling_upper_limit 或 none')
parser.add_argument('--disable-auto-plan-line', action='store_true', help='禁用自动提线功能')
args = parser.parse_args()
specified_operator = args.operator
specified_province = args.province
adjust_method = args.adjust_method
disable_auto_plan_line = args.disable_auto_plan_line
if (specified_operator and not specified_province) or (not specified_operator and specified_province):
parser.error("必须同时指定运营商和省份,或者都不指定。")
bd_data_path = '../data/work_data_mult.csv'
bandwidth_demands_dict = FileReader.read_demand_data(bd_data_path)
if not bandwidth_demands_dict:
logging.error("没有带宽需求数据,程序终止。")
return
province_limits_file = '../data/province_limit.csv'
province_limits = set()
operator_province_pairs = list(bandwidth_demands_dict.keys())
logging.info(f"总共有 {len(operator_province_pairs)} 个运营商-省份组合需要处理。")
if specified_operator and specified_province:
operator_province_pairs = [(specified_operator, specified_province)]
logging.info(f"只处理指定的运营商和省份:{specified_operator} {specified_province}")
else:
logging.info("处理所有运营商和省份的组合。")
if adjust_method == 'reduce_scheduling_upper_limit':
reduction_percentages = [i * 0.05 for i in range(5)]
elif adjust_method == 'reduce_free_minutes':
reduction_percentages = [i * 0.1 for i in range(10)]
else:
reduction_percentages = [0.0]
total_free_minutes_list = []
reuse_rate_list = []
output_dir = 'output'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
csv_filename = os.path.join(output_dir, 'free_minutes_vs_reuse_rate.csv')
with open(csv_filename, 'w', newline='') as csvfile:
fieldnames = ['Reduction_Percentage', 'Total_Free_Minutes', 'Sum_Upper_Limit_Bandwidth',
'Sum_Guaranteed_Bandwidth', 'Total_Bandwidth_95th_Percentile',
'Total_Node_95th_Percentile', 'Optimal_Reuse_Rate', 'Total_Cost']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for reduction in reduction_percentages:
logging.info(f"开始处理{'免费时长' if adjust_method == 'reduce_free_minutes' else '调度上限' if adjust_method == 'reduce_scheduling_upper_limit' else '正常'}减少 {reduction*100:.0f}% 的情况。")
bandwidth_demands_dict_copy = copy.deepcopy(bandwidth_demands_dict)
adjust_limits = True
if adjust_method == 'reduce_free_minutes':
pass
elif adjust_method == 'reduce_scheduling_upper_limit':
pass
else:
adjust_limits = False
results = process_all(
operator_province_pairs,
bandwidth_demands_dict_copy,
province_limits,
output_dir=output_dir,
reduction_percentage=reduction,
adjust_limits=adjust_limits,
enable_automatic_plan_line_adjustment=not disable_auto_plan_line
)
total_free_minutes = 0
total_reuse_rate = 0
count = 0
total_upper_limit_bandwidth = 0
total_guaranteed_bandwidth = 0
total_node_95 = 0
total_demand_95 = 0
total_cost = 0.0
for result in results:
if result is None:
continue
(operator, province, sum_upper_limit_bandwidth, sum_guaranteed_bandwidth,
node_95_value, demand_95_value, reuse_rate, total_cost_value) = result
if reuse_rate is not None:
total_reuse_rate += reuse_rate
count += 1
total_upper_limit_bandwidth += sum_upper_limit_bandwidth
total_guaranteed_bandwidth += sum_guaranteed_bandwidth
total_node_95 += node_95_value
total_demand_95 = demand_95_value
total_cost += total_cost_value
else:
total_upper_limit_bandwidth += sum_upper_limit_bandwidth
total_guaranteed_bandwidth += sum_guaranteed_bandwidth
total_cost += total_cost_value
if count > 0:
average_reuse_rate = total_reuse_rate / count
else:
average_reuse_rate = None
total_free_minutes_list.append(total_free_minutes)
reuse_rate_list.append(average_reuse_rate)
writer.writerow({
'Reduction_Percentage': f"{reduction*100:.0f}%",
'Total_Free_Minutes': f"{total_free_minutes:.2f}",
'Sum_Upper_Limit_Bandwidth': f"{total_upper_limit_bandwidth:.2f}",
'Sum_Guaranteed_Bandwidth': f"{total_guaranteed_bandwidth:.2f}",
'Total_Bandwidth_95th_Percentile': f"{total_demand_95:.2f}",
'Total_Node_95th_Percentile': f"{total_node_95:.2f}",
'Optimal_Reuse_Rate': f"{average_reuse_rate:.4f}" if average_reuse_rate is not None else "N/A",
'Total_Cost': f"{total_cost:.2f}"
})
if average_reuse_rate is not None:
logging.info(f"{'免费时长' if adjust_method == 'reduce_free_minutes' else '调度上限' if adjust_method == 'reduce_scheduling_upper_limit' else '正常'}减少 {reduction*100:.0f}% 时,平均复用率为 {average_reuse_rate:.4f},总付费成本为 {total_cost:.2f}")
else:
logging.info(f"{'免费时长' if adjust_method == 'reduce_free_minutes' else '调度上限' if adjust_method == 'reduce_scheduling_upper_limit' else '正常'}减少 {reduction*100:.0f}% 时,平均复用率无法计算,总付费成本为 {total_cost:.2f}")
logging.info(f"免费时长与复用率的关系已保存到 {csv_filename}")
logging.info("带宽分配系统结束运行。")
if __name__ == "__main__":
main()