模拟资源结构分布

# -*- coding: utf-8 -*-
"""
带宽分配系统与自动节点调整功能

本模块实现了一个带宽分配系统,基于客户的带宽使用情况进行分配。
系统会自动调整节点的数量和容量(每个节点最大不超过400G),以满足业务需求并优化复用率。

日期:2024-10-31
"""

import datetime
import csv
import json
import logging
import time
from decimal import Decimal
from concurrent.futures import ProcessPoolExecutor, as_completed
from enum import Enum

import pandas as pd
import copy
import numpy as np
import os
import argparse  # 用于解析命令行参数

# 定义常量
HEAP_REDUNDANT_TIME = 0  # 堆高策略的额外时间缓冲(分钟)
STEP_SIZE = 10.0  # 每次调整规划线的步长(Gbps)
MAX_ADJUSTMENTS = 1000  # 最大调整次数,防止无限循环
MAX_NODE_CAPACITY = 400.0  # 单个节点的最大容量(Gbps)

# 日志配置
logging.basicConfig(
    level=logging.INFO,  # 设置最低日志级别为INFO
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),  # 将日志输出到控制台
        logging.FileHandler("bandwidth_allocation.log")  # 将日志保存到文件
    ]
)


def debug_time(func):
    """装饰器,用于记录函数执行时间"""
    def wrapper(*args, **keyargs):
        now = time.time()
        logging.debug('=======before ' + func.__name__ + ' ' + str(now) + '======')
        result = func(*args, **keyargs)
        after = time.time()
        logging.debug('=======after ' + func.__name__ + ' ' + str(after - now) + '======')
        return result
    return wrapper


class BillingType(Enum):
    """计费方式枚举"""
    MONTHLY_95 = '月95'
    PACKAGE_PORT = '包端口'
    AVERAGE_TRAFFIC = '平均流量'
    # 如果有其他计费方式,可以在这里添加
    OTHER = '其他'


class NodeCapacity:
    """
    表示网络中的一个节点及其带宽容量管理。
    """
    def __init__(self, node_id, node_name, operator, capacity, plan_line, bill_rate=1.0,
                 total_time_slots=8640, billing_type='', node_province='', scheduling_upper_limit_percentage=1.0):
        """
        初始化节点对象。

        :param node_id: 节点唯一标识符
        :param node_name: 节点名称
        :param operator: 运营商名称
        :param capacity: 节点最大容量(Gbps)
        :param plan_line: 节点初始规划线(Gbps)
        :param bill_rate: 节点价格系数
        :param total_time_slots: 总时间槽数(5分钟一个时间槽)
        :param billing_type: 计费方式,使用 BillingType 枚举
        :param node_province: 节点所在省份
        :param scheduling_upper_limit_percentage: 调度上限百分比,默认1.0(100%)
        """
        self.node_id = node_id
        self.node_name = node_name
        self.operator = operator
        self.capacity = capacity
        self.plan_line = plan_line
        self.bill_rate = bill_rate
        self.billing_type = billing_type
        self.node_province = node_province
        self.total_time_slots = total_time_slots
        self.free_minutes = self.calculate_free_minutes(self.total_time_slots)
        self.assigned_bandwidths = {}
        self.scheduling_upper_limit_percentage = scheduling_upper_limit_percentage
        # 增加属性,标识是否为价格为0的95计费节点
        self.is_free_95_node = (self.billing_type == BillingType.MONTHLY_95 and self.bill_rate == 0)

    def calculate_free_minutes(self, total_time_slots):
        """
        根据总时间槽数和免计费比例,计算节点的免费超规划线时间。

        :param total_time_slots: 总时间槽数
        :return: 免费超规划线时间(分钟)
        """
        if self.billing_type == BillingType.PACKAGE_PORT:
            # 包端口节点不计算免费分钟数
            return 0
        else:
            free_slots = int(total_time_slots * 0.05)  # 5%免计费时间
            free_minutes = free_slots * 5  # 每个时间槽为5分钟
            return free_minutes

    def adjust_free_minutes(self, minutes):
        """
        调整节点的免费超规划线时间。

        :param minutes: 调整的分钟数
        """
        self.free_minutes += minutes
        logging.debug(f"节点 {self.node_name} 调整免费时长 {minutes} 分钟,剩余免费时长:{self.free_minutes} 分钟。")

    def can_deal_brick(self, brick):
        """
        判断节点是否有能力处理指定的砖块。

        :param brick: 砖块信息,包含持续时间('duration')
        :return: 布尔值,表示是否可以处理
        """
        return self.free_minutes >= (brick['duration'] + HEAP_REDUNDANT_TIME)

    def get_available_capacity(self):
        """
        节点的可用能力,单位为 Gbps
        """
        adjusted_capacity = self.capacity * self.scheduling_upper_limit_percentage
        return adjusted_capacity - self.plan_line

    def assign_bandwidth(self, bandwidth, timestamp):
        """
        为节点分配带宽并记录时间

        :param bandwidth: 分配的带宽(Gbps)
        :param timestamp: 时间戳
        """
        if bandwidth <= 0:
            logging.debug(f"节点 {self.node_name}{timestamp} 分配带宽为0,跳过。")
            return

        if timestamp in self.assigned_bandwidths:
            self.assigned_bandwidths[timestamp] += bandwidth  # 若该时间点已有记录,则累加
        else:
            self.assigned_bandwidths[timestamp] = bandwidth  # 否则新建记录
        logging.debug(
            f"节点 {self.node_name}{timestamp} 分配了 {bandwidth:.2f} Gbps。"
        )

    def increase_plan_line(self, increment):
        """
        增加节点的规划线

        :param increment: 增加的规划线值(Gbps)
        :return: 是否成功增加
        """
        adjusted_capacity = self.capacity * self.scheduling_upper_limit_percentage
        new_plan_line = self.plan_line + increment
        if new_plan_line > adjusted_capacity:
            logging.warning(f"节点 {self.node_name} 的规划线已达到最大容量,无法再增加。")
            return False
        old_plan_line = self.plan_line
        self.plan_line = new_plan_line
        logging.info(f"节点 {self.node_name} 的规划线从 {old_plan_line} Gbps 增加至 {self.plan_line} Gbps。")
        # self.log_current_status()
        return True

    def log_current_status(self):
        """
        记录当前节点的关键信息,包括规划线和剩余免费时长。
        """
        logging.info(
            f"节点信息:{self.node_name} | 规划线:{self.plan_line} Gbps | "
            f"剩余免费时长:{self.free_minutes} 分钟 | 价格系数:{self.bill_rate} | 计费方式:{self.billing_type.value}"
        )

    def calculate_cost(self):
        """
        计算节点的付费成本。
        """
        if self.billing_type == BillingType.MONTHLY_95:
            # 对于95计费的节点:成本 = 节点的95值 * 价格系数
            assigned_values = list(self.assigned_bandwidths.values())
            if assigned_values:
                node_95 = np.percentile(assigned_values, 95)
                return node_95 * self.bill_rate
            else:
                return 0.0
        elif self.billing_type == BillingType.PACKAGE_PORT:
            # 对于包端口节点:成本 = 端口容量 * 价格系数
            return self.capacity * self.bill_rate
        elif self.billing_type == BillingType.AVERAGE_TRAFFIC:
            # 对于平均流量节点:成本 = 月带宽均值 * 价格系数
            assigned_values = list(self.assigned_bandwidths.values())
            if assigned_values:
                avg_bandwidth = np.mean(assigned_values)
                return avg_bandwidth * self.bill_rate
            else:
                return 0.0
        else:
            return 0.0


class BandwidthDemand:
    """
    表示一个带宽需求实例。
    """

    def __init__(self, operator, province, timestamp, demand_value):
        self.operator = operator
        self.province = province
        self.timestamp = timestamp
        self.demand_value = demand_value  # 单位:Gbps


class FileReader:
    """文件读取器"""

    @staticmethod
    def read_demand_data(file_path):
        """
        读取带宽需求数据

        :param file_path: 带宽需求数据文件的路径
        :return: 按运营商和省份分组的带宽需求字典
        """
        try:
            demand_df = pd.read_csv(file_path)
            demand_df['ts'] = pd.to_datetime(demand_df['ts'])
            bandwidth_demands = {}
            for _, row in demand_df.iterrows():
                operator = row['isp_cn']
                province = row['node_province']
                timestamp = row['ts']
                demand_value = float(row['switch_gbps'])  # 确保是浮点数
                key = (operator, province)
                if key not in bandwidth_demands:
                    bandwidth_demands[key] = []
                bandwidth_demands[key].append(BandwidthDemand(operator, province, timestamp, demand_value))
            logging.info(f"成功读取带宽需求数据,共 {sum(len(v) for v in bandwidth_demands.values())} 条记录。")
            return bandwidth_demands
        except Exception as e:
            logging.error(f"读取带宽需求数据失败:{e}")
            return {}


def prepare_bricks(total_plan_line, bandwidth_demands, time_slot=45):
    """
    根据总规划线和带宽需求,生成砖块

    :param total_plan_line: 总规划线(Gbps)
    :param bandwidth_demands: 带宽需求列表
    :param time_slot: 砖块的时间槽大小(分钟)
    :return: bricks 字典
    """
    if not bandwidth_demands:
        return {}

    # 将带宽需求转换为DataFrame,方便处理
    demand_df = pd.DataFrame([(d.timestamp, d.demand_value) for d in bandwidth_demands], columns=['ts', 'demand'])
    demand_df.sort_values('ts', inplace=True)

    bricks = {}
    brick_id = 0
    current_brick = None

    for _, row in demand_df.iterrows():
        timestamp = row['ts']
        demand = row['demand']
        excess = demand - total_plan_line
        if excess <= 0:
            continue  # 当前时间点不需要堆高

        if current_brick is None:
            # 开始新的砖块
            current_brick = {
                'start_time': timestamp,
                'end_time': timestamp,
                'duration': 5,  # 初始化为5分钟
                'top_height': excess,
                'id': brick_id
            }
            brick_id += 1
        else:
            # 检查是否可以合并到当前砖块
            duration = (timestamp - current_brick['start_time']).total_seconds() / 60 + 5  # 加上当前的5分钟
            if duration <= time_slot:
                # 合并到当前砖块
                current_brick['end_time'] = timestamp
                current_brick['duration'] = duration
                current_brick['top_height'] = max(current_brick['top_height'], excess)
            else:
                # 保存当前砖块,开始新的砖块
                bricks[current_brick['id']] = current_brick
                logging.debug(f"生成砖块 {current_brick['id']}: {current_brick}")
                current_brick = {
                    'start_time': timestamp,
                    'end_time': timestamp,
                    'duration': 5,
                    'top_height': excess,
                    'id': brick_id
                }
                brick_id += 1

    # 保存最后一个砖块
    if current_brick is not None:
        bricks[current_brick['id']] = current_brick
        logging.debug(f"生成砖块 {current_brick['id']}: {current_brick}")

    logging.info(f"生成了 {len(bricks)} 个砖块。")
    return bricks


def calculate_sols(sol, only_one=True):
    """
    递归计算堆高组合

    :param sol: 当前解决方案的字典结构
    :param only_one: 是否只寻找一个组合
    :return: 布尔值,表示是否找到组合
    """
    result = False
    capacities = sol['can_use_capacities']

    # 创建节点ID到节点对象的映射
    nodeid2capacity = {capacity.node_id: capacity for capacity in capacities}

    # 如果没有砖块需要处理,说明所有需求已被满足
    if len(sol['bricks']) <= 0:
        return True
    else:
        # 选择需求最高的砖块(即top_height最大的砖块)
        bricks_sorted = sorted([brick for brick in sol['bricks'].values()], key=lambda x: -x['top_height'])
        current_brick = bricks_sorted[0]

        # 获取能够处理当前砖块的节点组合
        eligible_nodes = [node for node in capacities if node.can_deal_brick(current_brick)]
        node_combinations = get_nodes_combination(current_brick, eligible_nodes, only_one=True)

        logging.debug("为砖块 %s 计算出的节点组合:\n%s", current_brick['id'],
                      json.dumps(node_combinations, indent=4, ensure_ascii=False))

        # 如果没有合适的节点组合,返回失败
        if not node_combinations:
            sol['children'] = False
            return False

        # 遍历所有可能的节点组合
        for node_combination in node_combinations:
            # 调整节点的免费分钟数,减少当前砖块的持续时间
            for node_id in node_combination:
                capacity = nodeid2capacity.get(node_id)
                if capacity:
                    capacity.adjust_free_minutes(-current_brick['duration'] - HEAP_REDUNDANT_TIME)

            # 移除当前砖块
            updated_bricks = copy.deepcopy(sol['bricks'])
            del updated_bricks[current_brick['id']]

            # 创建子解决方案
            child_sol = {
                'parent': sol,
                'children': [],
                'can_use_capacities': capacities,
                'nodes': node_combination,
                'brick': current_brick,
                'bricks': updated_bricks
            }
            sol['children'].append(child_sol)

            # 递归调用
            s_result = calculate_sols(child_sol, only_one)
            result = result or s_result

            # 回退节点的免费分钟数
            for node_id in node_combination:
                capacity = nodeid2capacity.get(node_id)
                if capacity:
                    capacity.adjust_free_minutes(current_brick['duration'] + HEAP_REDUNDANT_TIME)

            # 如果只寻找一个组合,找到后立即退出
            if s_result and only_one:
                break

    return result


def has_sols(bricks, capacities):
    """
    对砖块和节点,计算最合适的堆高组合,返回(组合是否满足所有砖块的堆高,具体组合)

    :param bricks: 砖块字典
    :param capacities: 节点容量列表
    :return: (是否存在堆高组合, 组合方案字典)
    """
    solution = {
        'parent': None,
        'children': [],
        'nodes': None,
        'can_use_capacities': capacities,
        'brick': None,
        'bricks': bricks
    }
    result = calculate_sols(solution)
    return result, solution


def get_ignore_node_ids(current_node: dict) -> list:
    """
    获取需要忽略的节点ID列表,避免在兄弟节点中重复使用。

    :param current_node: 当前节点的字典结构
    :return: 忽略的节点ID列表
    """
    ignore_node_ids = []
    parent = current_node.get('parent')
    while parent is not None:
        for child in parent.get('children', []):
            ignore_node_ids.append(child['node_id'])
        parent = parent.get('parent')
    return ignore_node_ids


def calculate_nodes_combination(current_node: dict, ignore_node_ids: list = None, only_one: bool = True) -> bool:
    """
    递归计算某个堆高高度下是否存在节点组合能满足需求。

    :param current_node: 当前状态字典
    :param ignore_node_ids: 忽略的节点ID列表
    :param only_one: 是否只寻找一个组合
    :return: 布尔值,表示是否找到组合
    """
    if ignore_node_ids is None:
        ignore_node_ids = get_ignore_node_ids(current_node)

    logging.debug(f"当前组合计算: val={current_node['val']}, 忽略节点={ignore_node_ids}")
    success = False

    # 检查是否已经满足需求
    if current_node['val'] <= 0:
        logging.debug("目标高度已满足。")
        return True

    # 检查节点总能力是否足以满足需求
    total_capacity = sum(node.get_available_capacity() for node in current_node['capacities'])
    if current_node['val'] > total_capacity:
        logging.debug("节点总能力不足以满足需求。")
        return False

    # 选择需求最高的节点组合(这里简化为按可用容量排序)
    sorted_nodes = sorted(
        current_node['capacities'],
        key=lambda n: n.get_available_capacity(),
        reverse=True
    )

    for node in sorted_nodes:
        if node.node_id in ignore_node_ids:
            continue
        ignore_node_ids.append(node.node_id)

        remaining_val = current_node['val'] - node.get_available_capacity()
        child_val = max(remaining_val, 0)
        child_node = {
            'children': [],
            'parent': current_node,
            'node_id': node.node_id,
            'val': child_val,
            'capacities': current_node['capacities']
        }

        logging.debug(
            f"尝试节点 {node.node_id} ({node.node_name}), 当前val={current_node['val']} -> "
            f"child_val={child_val}, 节点可用容量={node.get_available_capacity()} Gbps, "
            f"累加后剩余需求={child_val}"
        )

        current_node['children'].append(child_node)

        # 递归调用
        result = calculate_nodes_combination(child_node, copy.deepcopy(ignore_node_ids), only_one)
        success = success or result

        if not result:
            ignore_node_ids.pop()

        if result and only_one:
            break

    if not success:
        logging.debug("组合失败,未找到合适的节点组合。")
    return success


def get_nodes_combination(brick: dict, eligible_nodes: list, only_one: bool = True) -> list:
    """
    获取满足砖块需求的节点组合。

    :param brick: 当前砖块信息
    :param eligible_nodes: 可用节点列表
    :param only_one: 是否只寻找一个组合
    :return: 节点组合列表
    """
    logging.debug(f"开始计算砖块 {brick['id']} 的节点组合,目标高度={brick['top_height']}")
    h = {
        'children': [],
        'parent': None,
        'node_id': None,
        'val': brick['top_height'],
        'capacities': eligible_nodes
    }

    if not eligible_nodes:
        logging.debug("警告:传入的节点容量列表为空,无法计算组合。")
        return []

    logging.debug(f"初始可用节点容量数:{len(eligible_nodes)}")
    for node in eligible_nodes:
        logging.debug(f"节点 {node.node_id} ({node.node_name}) - 可用容量:{node.get_available_capacity()} Gbps")

    calculate_nodes_combination(h, [], only_one)
    combination_result = get_combination(h)
    logging.debug(f"组合结果为: {combination_result}")
    return combination_result


def get_combination(h):
    """
    遍历组合树,返回所有有效的节点组合

    :param h: 当前节点的字典结构
    :return: 节点组合列表
    """
    combination = []
    if h['val'] > 0 and len(h['children']) == 0:
        return []
    if len(h['children']) > 0:
        for child in h['children']:
            child_combinations = get_combination(child)
            if len(child_combinations) == 0:
                continue
            for child_combination in child_combinations:
                if h['node_id'] is not None:
                    child_combination.append(h['node_id'])
                    combination.append(child_combination)
                else:
                    combination.append(child_combination)
    else:
        if h['node_id'] is not None:
            combination = [[h['node_id']]]
    return combination


def extract_allocation_plan(sol):
    """
    从堆高组合中提取每个砖块的分配方案

    :param sol: 堆高组合方案
    :return: allocation_plan 列表,包含每个砖块的分配信息
    """
    allocation_plan = []

    def traverse(sol):
        if sol['brick'] is not None:
            brick = sol['brick']
            nodes = sol['nodes']
            allocation_plan.append({
                'start_time': brick['start_time'],
                'end_time': brick['end_time'],
                'duration': brick['duration'],
                'top_height': brick['top_height'],
                'nodes': nodes
            })
            logging.info(
                f"砖块 {brick['id']} 分配: 时间范围 {brick['start_time']} - {brick['end_time']} "
                f"持续时间 {brick['duration']} 分钟,超出规划线 {brick['top_height']:.2f} Gbps,"
                f"分配节点: {', '.join(nodes)}"
            )

            # 记录分配细节
            for node_id in nodes:
                node = next((n for n in sol['can_use_capacities'] if n.node_id == node_id), None)
                if node:
                    allocated = node.assigned_bandwidths.get(brick['start_time'], 0)
                    logging.debug(f"节点 {node.node_name}{brick['start_time']} 分配了 {allocated} Gbps。")

        for child in sol.get('children', []):
            traverse(child)

    traverse(sol)
    return allocation_plan


def allocate_initial_bandwidth(timestamp, demand_value, nodes):
    """
    分配初始带宽,包括包端口节点和价格为0的95计费节点。
    返回已分配的带宽总和。
    """
    logging.debug(f"开始初始分配时间点 {timestamp} 的需求 {demand_value} Gbps。")

    # 包端口节点
    package_nodes = [node for node in nodes if node.billing_type == BillingType.PACKAGE_PORT]

    # 价格为0的95计费节点
    free_95_nodes = [node for node in nodes if node.is_free_95_node]

    # 合并包端口节点和免费95计费节点
    initial_nodes = package_nodes + free_95_nodes

    total_allocated = 0
    remaining_demand = demand_value

    # 分配到初始节点(包端口节点和免费95计费节点)
    if initial_nodes and remaining_demand > 0:
        for node in initial_nodes:
            allocation = min(node.capacity * node.scheduling_upper_limit_percentage * 0.88, remaining_demand)
            node.assign_bandwidth(allocation, timestamp)
            total_allocated += allocation
            remaining_demand -= allocation
            if remaining_demand <= 0:
                break

    return total_allocated  # 返回已分配的带宽


def allocate_average_traffic_bandwidth(timestamp, demand_value, nodes):
    """
    分配平均流量带宽,仅在18:00到22:00之间分配到平均流量节点。
    返回已分配的带宽总和。
    """
    logging.debug(f"开始分配平均流量节点时间点 {timestamp} 的需求 {demand_value} Gbps。")

    # 检查时间是否在18:00到22:00之间
    if not (18 <= timestamp.hour < 22):
        logging.debug(f"时间点 {timestamp} 不在 18:00 到 22:00 之间,跳过平均流量节点分配。")
        return 0

    # 平均流量节点
    average_nodes = [node for node in nodes if node.billing_type == BillingType.AVERAGE_TRAFFIC]

    total_allocated = 0
    remaining_demand = demand_value

    # 分配到平均流量节点
    if average_nodes and remaining_demand > 0:
        for node in average_nodes:
            allocation = min(node.capacity * node.scheduling_upper_limit_percentage * 0.88, remaining_demand)
            node.assign_bandwidth(allocation, timestamp)
            total_allocated += allocation
            remaining_demand -= allocation
            if remaining_demand <= 0:
                break

    return total_allocated  # 返回已分配的带宽


def allocate_remaining_bandwidth(timestamp, demand_value, nodes, total_plan_line, allocation_plan):
    """
    分配剩余的带宽需求,主要针对非免费月95计费节点。
    """
    logging.debug(f"开始分配剩余需求时间点 {timestamp} 的需求 {demand_value} Gbps。")

    # 月95计费节点(排除价格为0的节点)
    billing_nodes = [node for node in nodes if node.billing_type == BillingType.MONTHLY_95 and not node.is_free_95_node]

    remaining_demand = demand_value

    # 计算总规划线
    total_plan_line = sum(node.plan_line for node in billing_nodes)

    if remaining_demand <= total_plan_line:
        # 需求未超出总规划线,平均分配到月95计费节点
        total_allocated = 0
        allocations = []
        num_nodes = len(billing_nodes)
        for i, node in enumerate(billing_nodes):
            if i < num_nodes - 1:
                allocation = remaining_demand * (node.plan_line / total_plan_line)
                allocation = round(allocation, 10)  # 限制浮点数精度
                node.assign_bandwidth(allocation, timestamp)
                total_allocated += allocation
                allocations.append(allocation)
                logging.debug(f"节点 {node.node_name}{timestamp} 分配了 {allocation:.10f} Gbps(平均分配)。")
            else:
                # 最后一个节点,调整分配量以确保总和精确等于剩余需求
                allocation = remaining_demand - total_allocated
                node.assign_bandwidth(allocation, timestamp)
                allocations.append(allocation)
                logging.debug(f"节点 {node.node_name}{timestamp} 分配了 {allocation:.10f} Gbps(平均分配,调整后)。")
    else:
        # 需求超出总规划线,先按照规划线分配
        total_allocated = 0
        for node in billing_nodes:
            plan_line = node.plan_line
            if remaining_demand > 0:
                allocation = min(remaining_demand, plan_line)
                node.assign_bandwidth(allocation, timestamp)
                total_allocated += allocation
                remaining_demand -= allocation
        logging.debug(f"月95计费节点在 {timestamp} 分配了 {total_allocated:.2f} Gbps(规划线部分)。")

        # 如果还有剩余需求,按照堆高方案分配
        if remaining_demand > 0 and allocation_plan:
            allocated = False
            for plan in allocation_plan:
                if plan['start_time'] <= timestamp <= plan['end_time']:
                    # 获取该方案中的节点
                    nodes_in_plan = [node for node in nodes if node.node_id in plan['nodes']]
                    total_available_capacity = sum(node.capacity * node.scheduling_upper_limit_percentage - node.assigned_bandwidths.get(timestamp, 0) for node in nodes_in_plan)
                    if total_available_capacity <= 0:
                        continue
                    for node in nodes_in_plan:
                        node_available_capacity = node.capacity * node.scheduling_upper_limit_percentage - node.assigned_bandwidths.get(timestamp, 0)
                        node_excess_allocation = node_available_capacity / total_available_capacity * remaining_demand
                        node_excess_allocation = min(node_excess_allocation, node_available_capacity)
                        node.assign_bandwidth(node_excess_allocation, timestamp)
                    allocated = True
                    logging.info(f"时间点 {timestamp} 的剩余需求 {remaining_demand:.2f} Gbps 已分配到堆高方案节点。")
                    remaining_demand = 0
                    break
            if not allocated:
                logging.warning(f"时间点 {timestamp} 的需求 {remaining_demand:.2f} Gbps 超出可分配范围,未分配。")

        # 如果还有剩余需求,无法分配
        if remaining_demand > 0:
            logging.warning(f"时间点 {timestamp} 的需求 {remaining_demand:.2f} Gbps 无法分配,超出所有节点容量。")


def save_to_csv(nodes, timestamps, operator, province, total_demand_dict, filename='bandwidth_allocation_summary.csv'):
    """
    将带宽分配详情按时间节点写入CSV文件

    :param nodes: 所有节点对象列表
    :param timestamps: 所有时间点列表
    :param operator: 运营商名称
    :param province: 省份名称
    :param total_demand_dict: 每个时间点的总带宽需求
    :param filename: CSV文件名
    """
    try:
        with open(filename, 'w', newline='') as csvfile:
            # 定义CSV的列,包含运营商、省份、时间戳、总带宽需求和每个节点的带宽分配情况
            fieldnames = ['Operator', 'Province', 'Timestamp', 'Total_Demand'] + [node.node_name for node in nodes]
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

            # 对每个时间点记录分配情况
            for timestamp in timestamps:
                row = {
                    'Operator': operator,
                    'Province': province,
                    'Timestamp': timestamp.strftime('%Y-%m-%d %H:%M:%S'),
                    'Total_Demand': f"{total_demand_dict.get(timestamp, 0):.2f}"
                }
                for node in nodes:
                    # 从 assigned_bandwidths 中获取该时间点的分配带宽,若无则为0
                    bandwidth = node.assigned_bandwidths.get(timestamp, 0)
                    row[node.node_name] = f"{bandwidth:.2f}"  # 格式化为两位小数
                writer.writerow(row)
        logging.info(f"带宽分配明细已保存到 {filename}")
    except Exception as e:
        logging.error(f"保存带宽分配结果到CSV失败:{e}")


def automatic_plan_line_adjustment(nodes, bandwidth_demands, step_size=STEP_SIZE):
    """
    自动调整月95计费节点的规划线,直到找到可行的堆高方案或无法继续调整。
    """
    adjustment_attempts = 0

    # 仅对非免费月95计费的节点进行提线
    billing_nodes = [node for node in nodes if node.billing_type == BillingType.MONTHLY_95 and not node.is_free_95_node]

    if not billing_nodes:
        logging.warning("没有可调整的月95计费节点,无法进行提线调整。")
        return []

    # 按照价格系数从小到大排序
    nodes_sorted = sorted(billing_nodes, key=lambda x: x.bill_rate)
    num_nodes = len(nodes_sorted)
    current_node_index = 0  # 当前提线的节点索引

    while adjustment_attempts < MAX_ADJUSTMENTS:
        total_plan_line = sum(node.plan_line for node in billing_nodes)
        total_capacity = sum(node.capacity * node.scheduling_upper_limit_percentage for node in billing_nodes)
        max_demand = max(demand.demand_value for demand in bandwidth_demands)  # 使用最大需求
        logging.info(f"尝试总规划线 {total_plan_line} Gbps,节点总容量 {total_capacity} Gbps,最大需求 {max_demand} Gbps。")

        if total_capacity < max_demand:
            logging.error("节点总容量不足以满足最大带宽需求。")
            break

        # 判断是否需要生成砖块
        if max_demand > total_plan_line:
            # 需求超出总规划线,需要生成砖块
            bricks = prepare_bricks(total_plan_line, bandwidth_demands)
            if not bricks:
                logging.warning("没有生成任何砖块,所有带宽需求均未超过总规划线。")
                return []
            # 计算堆高组合
            success, sol = has_sols(bricks, billing_nodes)
            if success:
                logging.info("找到可行的堆高组合。")
                allocation_plan = extract_allocation_plan(sol)
                return allocation_plan
            else:
                logging.info("未找到可行的堆高组合。")
                allocation_plan = []
        else:
            # 需求未超出总规划线,无需生成砖块
            logging.info("需求未超出总规划线,无需堆高。")
            return []

        # 找不到方案,开始提线
        # 提线顺序按价格系数从小到大排序,循环提线
        adjustment_made = False
        attempts = 0  # 防止无限循环

        while attempts < num_nodes:
            node = nodes_sorted[current_node_index]
            adjusted_capacity = node.capacity * node.scheduling_upper_limit_percentage
            if node.plan_line < adjusted_capacity:
                logging.info(f"尝试为节点 {node.node_name} 提线 {step_size} Gbps。")
                success_adjust = node.increase_plan_line(step_size)
                if success_adjust:
                    adjustment_attempts += 1
                    adjustment_made = True
                    logging.info(f"已为节点 {node.node_name} 提线至 {node.plan_line} Gbps。")
                    current_node_index = (current_node_index + 1) % num_nodes
                    break  # 提线一个节点后退出内层循环
                else:
                    logging.info(f"节点 {node.node_name} 已无法提线,跳过。")
            else:
                logging.info(f"节点 {node.node_name} 已达到最大容量,跳过。")

            # 移动到下一个节点
            current_node_index = (current_node_index + 1) % num_nodes
            attempts += 1

        if not adjustment_made:
            logging.warning("所有节点的规划线已达到最大容量,无法再提线。")
            break

    logging.error("未能找到可行的带宽分配方案,即使在调整规划线后。")
    return None


def calculate_reuse_rate(original_bandwidth_demands, assigned_bandwidths, nodes):
    """
    计算复用率。

    复用率计算 = 1 - (对应地区节点95值累加 / 总带宽95值)

    :param original_bandwidth_demands: 原始带宽需求列表
    :param assigned_bandwidths: 节点分配的带宽字典(时间戳: 总分配带宽)
    :param nodes: 节点列表(包括所有95计费节点)
    :return: (复用率, 总节点95值, 总需求95值)
    """
    if not original_bandwidth_demands or not nodes:
        return None

    # 计算总需求的95th percentile
    demand_values = [demand.demand_value for demand in original_bandwidth_demands]
    demand_95 = np.percentile(demand_values, 95)

    # 计算所有95计费节点的带宽分配的95th percentile
    billing_nodes = [node for node in nodes if node.billing_type == BillingType.MONTHLY_95]

    node_95_values = []
    for node in billing_nodes:
        assigned_values = list(node.assigned_bandwidths.values())
        if assigned_values:
            node_95 = np.percentile(assigned_values, 95)
            node_95_values.append(node_95)
    total_node_95 = sum(node_95_values)

    if demand_95 == 0:
        return None

    reuse_rate = 1 - (total_node_95 / demand_95)
    return reuse_rate, total_node_95, demand_95  # 返回复用率和总节点95值、总需求95值


def process_operator_province(operator, province, demands, province_limits,
                              output_dir='output', reduction_percentage=0.0,
                              adjust_limits=True, enable_automatic_plan_line_adjustment=True):
    """
    处理单个运营商和省份的带宽分配和优化。

    :param operator: 运营商名称
    :param province: 省份名称
    :param demands: 带宽需求列表
    :param province_limits: 省份限制集合
    :param output_dir: 输出目录
    :param reduction_percentage: 当前的免费时长减少比例
    :param adjust_limits: 是否进行免费时长调减和调度上限调整,默认为True
    :param enable_automatic_plan_line_adjustment: 是否启用自动提线,默认为True
    :return: (operator, province, sum_upper_limit_bandwidth, sum_guaranteed_bandwidth, total_node_95, demand_95, reuse_rate, total_cost)
    """
    logging.info(f"开始处理运营商 {operator},省份 {province}。")

    # 保存原始的总带宽需求,用于计算固定的Total_Bandwidth_95th_Percentile
    original_demands = demands.copy()

    # 检查省份限制
    if (operator, province) in province_limits:
        # 只允许在本省节点上分配
        logging.info(f"根据省份限制,运营商 {operator},省份 {province} 只使用本省节点。")
        # 只处理本省的带宽需求
        demands = [demand for demand in demands if demand.province == province]
        logging.info(f"处理 {operator} {province} 的本省带宽需求,共 {len(demands)} 条记录。")
    else:
        # 无省份限制,将同一运营商的所有省份带宽需求聚合
        demands = aggregate_demands_by_operator(operator, demands)
        original_demands = demands.copy()  # 更新原始需求
        logging.info(f"处理运营商 {operator} 的聚合带宽需求,共 {len(demands)} 条记录。")
        province = '无限制'

    # **自动调整节点数量和容量**
    # 初始化节点列表
    nodes = initialize_nodes(operator, province, demands)
    logging.info(f"初始化节点完成,共 {len(nodes)} 个节点。")

    if not nodes:
        logging.warning(f"运营商 {operator},省份 {province} 没有可用节点,跳过。")
        return operator, province, 0, 0, 0, 0, None, 0.0

    # **首先,分配带宽给包端口节点和免费95计费节点**
    # 获取所有时间点
    timestamps = sorted({demand.timestamp for demand in demands})

    # 创建一个字典,用于记录每个时间点的剩余需求和总需求
    remaining_demands = {}
    total_demand_dict = {}  # 每个时间点的总需求

    for demand in demands:
        timestamp = demand.timestamp
        demand_value = demand.demand_value
        total_demand_dict[timestamp] = demand_value  # 记录总需求

        # 分配初始带宽(包端口节点和免费95计费节点)
        allocated_bandwidth = allocate_initial_bandwidth(timestamp, demand_value, nodes)

        # 计算剩余需求
        remaining_demand_value = demand_value - allocated_bandwidth
        remaining_demands[timestamp] = remaining_demand_value

    # **然后,分配带宽给平均流量节点(仅在18:00到22:00之间)**
    for demand in demands:
        timestamp = demand.timestamp
        remaining_demand_value = remaining_demands[timestamp]

        # 分配带宽到平均流量节点
        allocated_bandwidth = allocate_average_traffic_bandwidth(timestamp, remaining_demand_value, nodes)

        # 更新剩余需求
        remaining_demands[timestamp] -= allocated_bandwidth

    # **然后,使用剩余的需求进行堆高方案计算和自动提线**

    # 过滤出非免费月95计费的节点
    billing_nodes = [node for node in nodes if node.billing_type == BillingType.MONTHLY_95 and not node.is_free_95_node]

    if not billing_nodes:
        logging.warning(f"运营商 {operator},省份 {province} 没有可调整的月95计费节点,跳过。")
        return operator, province, 0, 0, 0, 0, None, 0.0

    # 计算总规划线
    total_plan_line = sum(node.plan_line for node in billing_nodes)
    logging.info(f"运营商 {operator},省份 {province} 的可调整月95计费节点总规划线为 {total_plan_line} Gbps。")

    # 准备剩余的带宽需求列表
    remaining_bandwidth_demands = [
        BandwidthDemand(operator, province, timestamp, remaining_demands[timestamp])
        for timestamp in timestamps if remaining_demands[timestamp] > 0
    ]

    if enable_automatic_plan_line_adjustment:
        # 进行带宽分配与自动提线
        allocation_plan = automatic_plan_line_adjustment(
            billing_nodes,
            remaining_bandwidth_demands,
            step_size=STEP_SIZE
        )

        if allocation_plan is None:
            logging.error(f"运营商 {operator},省份 {province} 的带宽分配失败,未找到可行的方案。")
            return operator, province, 0, 0, 0, 0, None, 0.0
    else:
        # 不进行自动提线,直接尝试现有的规划线进行堆高方案计算
        total_plan_line = sum(node.plan_line for node in billing_nodes)
        logging.info(f"使用当前的总规划线 {total_plan_line} Gbps 进行堆高方案计算。")
        # 生成砖块
        bricks = prepare_bricks(total_plan_line, remaining_bandwidth_demands)
        if bricks:
            success, sol = has_sols(bricks, billing_nodes)
            if success:
                logging.info("找到可行的堆高组合。")
                allocation_plan = extract_allocation_plan(sol)
            else:
                logging.error("未能找到可行的堆高方案,带宽分配失败。")
                return operator, province, 0, 0, 0, 0, None, 0.0
        else:
            logging.info("需求未超出总规划线,无需堆高。")
            allocation_plan = []

    logging.info(f"运营商 {operator},省份 {province} 的带宽分配与{'自动提线' if enable_automatic_plan_line_adjustment else '堆高方案计算'}完成。")
    # 分配剩余的带宽需求
    for demand in remaining_bandwidth_demands:
        allocate_remaining_bandwidth(
            demand.timestamp,
            demand.demand_value,
            nodes,
            total_plan_line,
            allocation_plan
        )

    # 保存分配结果
    # 修改输出文件名,包含减少的免费时长比例、运营商和省份
    reduction_str = f"{int(reduction_percentage * 100)}%"
    if province == '':
        province = '全部省份'

    # 创建对应的子目录
    sub_dir = os.path.join(output_dir, f"减免{reduction_str}_")
    if not os.path.exists(sub_dir):
        os.makedirs(sub_dir)

    output_filename = os.path.join(sub_dir, f"{operator}_{province}.csv")
    save_to_csv(nodes, timestamps, operator, province, total_demand_dict, filename=output_filename)

    # 计算复用率,考虑所有月95计费节点(包括免费节点)
    # 我们需要在计算复用率时使用原始的总需求,而不是剩余需求
    # 首先,获取所有时间点的总需求
    total_demand_values = [total_demand_dict[timestamp] for timestamp in timestamps]

    # 计算所有节点在每个时间点的总分配带宽
    assigned_bandwidths = {}
    for timestamp in timestamps:
        assigned_bandwidths[timestamp] = sum(node.assigned_bandwidths.get(timestamp, 0) for node in nodes if node.billing_type == BillingType.MONTHLY_95)

    # 计算复用率
    reuse_rate_result = calculate_reuse_rate(
        original_demands,  # 使用原始的总需求
        assigned_bandwidths,  # 节点的分配带宽总和
        nodes  # 包含所有95计费节点
    )

    if reuse_rate_result is not None:
        reuse_rate, total_node_95, demand_95 = reuse_rate_result
        logging.info(f"运营商 {operator},省份 {province} 的复用率为 {reuse_rate:.4f}。")
    else:
        reuse_rate = None
        total_node_95 = 0
        demand_95 = 0
        logging.info(f"运营商 {operator},省份 {province} 的复用率无法计算。")

    # 计算上限带宽累加和保底带宽累加
    sum_upper_limit_bandwidth = sum(node.capacity * node.scheduling_upper_limit_percentage for node in nodes if node.billing_type == BillingType.MONTHLY_95)
    sum_guaranteed_bandwidth = sum(node.plan_line for node in nodes if node.billing_type == BillingType.MONTHLY_95)

    # 计算总成本
    total_cost = 0.0
    for node in nodes:
        node_cost = node.calculate_cost()
        total_cost += node_cost

    logging.info(f"运营商 {operator},省份 {province} 的总付费成本为 {total_cost:.2f}。")

    return (operator, province, sum_upper_limit_bandwidth, sum_guaranteed_bandwidth, total_node_95, demand_95, reuse_rate, total_cost)


def aggregate_demands_by_operator(operator, demands):
    """
    将同一运营商的所有省份带宽需求在每个时间点聚合

    :param operator: 运营商名称
    :param demands: 带宽需求列表
    :return: 聚合后的带宽需求列表
    """
    demand_df = pd.DataFrame([(d.timestamp, d.demand_value) for d in demands], columns=['timestamp', 'demand_value'])
    aggregated_df = demand_df.groupby('timestamp').sum().reset_index()
    aggregated_demands = [BandwidthDemand(operator, '', row['timestamp'], row['demand_value']) for _, row in aggregated_df.iterrows()]
    return aggregated_demands


def process_all(operator_province_pairs, bandwidth_demands_dict, province_limits,
                output_dir='output', reduction_percentage=0.0, adjust_limits=True, enable_automatic_plan_line_adjustment=True):
    """
    并发处理所有运营商和省份的带宽分配任务。

    :param operator_province_pairs: 运营商和省份的元组列表
    :param bandwidth_demands_dict: 按运营商和省份分组的带宽需求字典
    :param province_limits: 省份限制集合
    :param output_dir: 输出目录
    :param reduction_percentage: 当前的免费时长减少比例
    :param adjust_limits: 是否进行免费时长调减和调度上限调整
    :param enable_automatic_plan_line_adjustment: 是否启用自动提线
    :return: 结果列表
    """
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    results = []
    with ProcessPoolExecutor() as executor:
        futures = []
        # 首先处理有省份限制的运营商和省份
        for (operator, province) in operator_province_pairs:
            if (operator, province) in province_limits:
                demands = bandwidth_demands_dict.get((operator, province), [])
                if not demands:
                    logging.warning(f"运营商 {operator},省份 {province} 没有带宽需求,跳过。")
                    continue
                futures.append(
                    executor.submit(
                        process_operator_province,
                        operator,
                        province,
                        demands,
                        province_limits,
                        output_dir,
                        reduction_percentage,  # 传递减少比例
                        adjust_limits,  # 传递参数
                        enable_automatic_plan_line_adjustment  # 传递参数
                    )
                )

        # 然后处理无限制的运营商
        operators = set([op for op, _ in operator_province_pairs])
        for operator in operators:
            # 获取该运营商的无限制省份
            unlimited_provinces = [province for op, province in operator_province_pairs if
                                   op == operator and (op, province) not in province_limits]
            if not unlimited_provinces:
                continue  # 如果该运营商没有无限制的省份,跳过
            # 聚合该运营商的无限制省份的带宽需求
            demands = []
            for province in unlimited_provinces:
                demands.extend(bandwidth_demands_dict.get((operator, province), []))
            if not demands:
                logging.warning(f"运营商 {operator} 没有无限制省份的带宽需求,跳过。")
                continue
            # 调用 process_operator_province,province 参数设为 '无限制'
            futures.append(
                executor.submit(
                    process_operator_province,
                    operator,
                    '无限制',
                    demands,
                    province_limits,
                    output_dir,
                    reduction_percentage,  # 传递减少比例
                    adjust_limits,  # 传递参数
                    enable_automatic_plan_line_adjustment  # 传递参数
                )
            )

        for future in as_completed(futures):
            result = future.result()
            results.append(result)

    return results


def initialize_nodes(operator, province, demands):
    """
    根据业务需求自动初始化节点列表,调整节点数量和容量。

    :param operator: 运营商名称
    :param province: 省份名称
    :param demands: 带宽需求列表
    :return: 初始化的节点列表
    """
    # 获取需求的最大值
    max_demand = max(demand.demand_value for demand in demands)

    # 初始估计需要的总容量,可以稍微增加一些裕度
    total_capacity_needed = max_demand * 1.1  # 增加10%的裕度

    # 单个节点的最大容量
    max_node_capacity = MAX_NODE_CAPACITY  # 400G

    # 计算需要的节点数量
    num_nodes = int(np.ceil(total_capacity_needed / max_node_capacity))

    # 计算每个节点的容量
    node_capacity = total_capacity_needed / num_nodes

    # 确保单个节点的容量不超过最大容量
    if node_capacity > max_node_capacity:
        node_capacity = max_node_capacity

    # 初始化节点列表
    nodes = []
    for i in range(num_nodes):
        node_id = f"{operator}_{province}_node_{i+1}"
        node_name = f"{operator}_{province}_node_{i+1}"
        capacity = node_capacity
        plan_line = capacity * 0.3  # 保底设置为容量的30%
        node = NodeCapacity(
            node_id=node_id,
            node_name=node_name,
            operator=operator,
            capacity=capacity,
            plan_line=plan_line,
            bill_rate=1.0,
            billing_type=BillingType.MONTHLY_95,
            node_province=province
        )
        nodes.append(node)

    logging.info(f"根据需求初始化节点,共 {num_nodes} 个,每个节点容量 {node_capacity:.2f} Gbps。")
    return nodes


def main():
    """
    主程序入口。
    """
    logging.info("带宽分配系统启动。")

    # 命令行参数解析
    parser = argparse.ArgumentParser(description='带宽分配系统')
    parser.add_argument('--operator', type=str, help='指定运营商名称')
    parser.add_argument('--province', type=str, help='指定省份名称')
    parser.add_argument('--adjust-method', type=str, choices=['reduce_free_minutes', 'reduce_scheduling_upper_limit', 'none'],
                        default='reduce_free_minutes', help='选择调整方式:reduce_free_minutes、reduce_scheduling_upper_limit 或 none')
    parser.add_argument('--disable-auto-plan-line', action='store_true', help='禁用自动提线功能')
    args = parser.parse_args()

    specified_operator = args.operator
    specified_province = args.province
    adjust_method = args.adjust_method
    disable_auto_plan_line = args.disable_auto_plan_line

    if (specified_operator and not specified_province) or (not specified_operator and specified_province):
        parser.error("必须同时指定运营商和省份,或者都不指定。")

    # 读取带宽需求数据
    bd_data_path = '../data/work_data_mult.csv'  # 请确保路径正确
    bandwidth_demands_dict = FileReader.read_demand_data(bd_data_path)

    if not bandwidth_demands_dict:
        logging.error("没有带宽需求数据,程序终止。")
        return

    # 读取省份限制数据
    province_limits_file = '../data/province_limit.csv'  # 请确保路径正确
    province_limits = set()  # 本示例中不考虑省份限制

    # 获取所有运营商和省份的组合
    operator_province_pairs = list(bandwidth_demands_dict.keys())
    logging.info(f"总共有 {len(operator_province_pairs)} 个运营商-省份组合需要处理。")

    # 如果指定了运营商和省份,只处理指定的组合
    if specified_operator and specified_province:
        operator_province_pairs = [(specified_operator, specified_province)]
        logging.info(f"只处理指定的运营商和省份:{specified_operator} {specified_province}")
    else:
        logging.info("处理所有运营商和省份的组合。")

    # 定义减少比例列表,例如 [0, 0.1, 0.2, ..., 0.2] 对于调度上限从100%降到80%
    if adjust_method == 'reduce_scheduling_upper_limit':
        reduction_percentages = [i * 0.05 for i in range(5)]  # 从0%到20%,每次减少5%
    elif adjust_method == 'reduce_free_minutes':
        reduction_percentages = [i * 0.1 for i in range(10)]  # 从0%到90%,每次减少10%
    else:
        reduction_percentages = [0.0]  # 不进行任何调整

    # 准备记录结果的列表
    total_free_minutes_list = []
    reuse_rate_list = []

    # 创建输出目录
    output_dir = 'output'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 打开CSV文件,准备写入结果
    csv_filename = os.path.join(output_dir, 'free_minutes_vs_reuse_rate.csv')
    with open(csv_filename, 'w', newline='') as csvfile:
        # 修改字段名,增加 'Total_Node_95th_Percentile' 和 'Total_Cost'
        fieldnames = ['Reduction_Percentage', 'Total_Free_Minutes', 'Sum_Upper_Limit_Bandwidth',
                      'Sum_Guaranteed_Bandwidth', 'Total_Bandwidth_95th_Percentile',
                      'Total_Node_95th_Percentile', 'Optimal_Reuse_Rate', 'Total_Cost']
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()

        # 遍历每个减少比例
        for reduction in reduction_percentages:
            logging.info(f"开始处理{'免费时长' if adjust_method == 'reduce_free_minutes' else '调度上限' if adjust_method == 'reduce_scheduling_upper_limit' else '正常'}减少 {reduction*100:.0f}% 的情况。")

            # 深拷贝需求数据,以避免在迭代中相互影响
            bandwidth_demands_dict_copy = copy.deepcopy(bandwidth_demands_dict)

            adjust_limits = True
            if adjust_method == 'reduce_free_minutes':
                # 根据减少比例调整节点的免费时长
                pass  # 在自动节点调整的情况下,这部分可以省略或根据需要修改
            elif adjust_method == 'reduce_scheduling_upper_limit':
                # 根据减少比例调整节点的调度上限
                pass  # 同上
            else:
                # 不进行免费时长调减和调度上限调整
                adjust_limits = False

            # 并发处理所有运营商和省份,传递 reduction 参数和 adjust_limits 参数
            results = process_all(
                operator_province_pairs,
                bandwidth_demands_dict_copy,
                province_limits,
                output_dir=output_dir,
                reduction_percentage=reduction,  # 新增参数
                adjust_limits=adjust_limits,
                enable_automatic_plan_line_adjustment=not disable_auto_plan_line  # 根据命令行参数设置
            )

            # 计算总的免费时长和平均复用率
            total_free_minutes = 0
            total_reuse_rate = 0
            count = 0
            total_upper_limit_bandwidth = 0
            total_guaranteed_bandwidth = 0
            total_node_95 = 0
            total_demand_95 = 0  # 总的需求95值,应该是固定的
            total_cost = 0.0  # 总成本

            for result in results:
                if result is None:
                    continue
                (operator, province, sum_upper_limit_bandwidth, sum_guaranteed_bandwidth,
                 node_95_value, demand_95_value, reuse_rate, total_cost_value) = result
                # 统计所有节点的免费时长
                # 在自动节点调整的情况下,可以根据需要统计节点的免费时长
                # total_free_minutes += ...

                if reuse_rate is not None:
                    total_reuse_rate += reuse_rate
                    count += 1
                    total_upper_limit_bandwidth += sum_upper_limit_bandwidth
                    total_guaranteed_bandwidth += sum_guaranteed_bandwidth
                    total_node_95 += node_95_value
                    total_demand_95 = demand_95_value  # 记录需求95值
                    total_cost += total_cost_value  # 累加总成本
                else:
                    total_upper_limit_bandwidth += sum_upper_limit_bandwidth
                    total_guaranteed_bandwidth += sum_guaranteed_bandwidth
                    total_cost += total_cost_value  # 即使复用率无法计算,也要累加成本

            if count > 0:
                average_reuse_rate = total_reuse_rate / count
            else:
                average_reuse_rate = None

            # 记录结果
            total_free_minutes_list.append(total_free_minutes)
            reuse_rate_list.append(average_reuse_rate)

            writer.writerow({
                'Reduction_Percentage': f"{reduction*100:.0f}%",
                'Total_Free_Minutes': f"{total_free_minutes:.2f}",
                'Sum_Upper_Limit_Bandwidth': f"{total_upper_limit_bandwidth:.2f}",
                'Sum_Guaranteed_Bandwidth': f"{total_guaranteed_bandwidth:.2f}",
                'Total_Bandwidth_95th_Percentile': f"{total_demand_95:.2f}",
                'Total_Node_95th_Percentile': f"{total_node_95:.2f}",  # 新增字段
                'Optimal_Reuse_Rate': f"{average_reuse_rate:.4f}" if average_reuse_rate is not None else "N/A",
                'Total_Cost': f"{total_cost:.2f}"  # 新增字段
            })

            if average_reuse_rate is not None:
                logging.info(f"{'免费时长' if adjust_method == 'reduce_free_minutes' else '调度上限' if adjust_method == 'reduce_scheduling_upper_limit' else '正常'}减少 {reduction*100:.0f}% 时,平均复用率为 {average_reuse_rate:.4f},总付费成本为 {total_cost:.2f}")
            else:
                logging.info(f"{'免费时长' if adjust_method == 'reduce_free_minutes' else '调度上限' if adjust_method == 'reduce_scheduling_upper_limit' else '正常'}减少 {reduction*100:.0f}% 时,平均复用率无法计算,总付费成本为 {total_cost:.2f}")

    logging.info(f"免费时长与复用率的关系已保存到 {csv_filename}")

    logging.info("带宽分配系统结束运行。")


if __name__ == "__main__":
    main()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

数据分析螺丝钉

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值