import json
import os
import re
import logging
import sys
from pathlib import Path
from shutil import copy2
from datetime import datetime
from utils import resource_path
# -------------------------------
# 日志配置
# -------------------------------
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
LOG_DIR = PROJECT_ROOT / "output" / "log"
LOG_DIR.mkdir(parents=True, exist_ok=True)
LOG_FILE = LOG_DIR / f"range_sync_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
class CLMRangeSynchronizer:
def __init__(self, c_file_path=None, dry_run=False,config_path="config/config.json"):
self.logger = logging.getLogger(__name__)
# === Step 1: 使用 resource_path 解析所有路径 ===
self.config_file_path = resource_path(config_path)
logging.info(f"配置文件: {self.config_file_path}")
if not os.path.exists(self.config_file_path):
raise FileNotFoundError(f"配置文件不存在: {self.config_file_path}")
try:
with open(self.config_file_path, 'r', encoding='utf-8') as f:
self.config = json.load(f)
print(f"配置文件已加载: {self.config_file_path}")
except json.JSONDecodeError as e:
raise ValueError(f"配置文件格式错误,JSON 解析失败: {self.config_file_path}") from e
except Exception as e:
raise RuntimeError(f"读取配置文件时发生未知错误: {e}") from e
self.dry_run = dry_run
if c_file_path is None:
# 使用内置默认 C 文件(被打包进 exe 的)
if "target_c_file" not in self.config:
raise KeyError(" config 文件缺少 'target_c_file' 字段")
internal_c_path = self.config["target_c_file"]
logging.info(f"使用内置 C 文件: {internal_c_path}")
self.c_file_path = Path(resource_path(internal_c_path))
self._is_internal_c_file = True
else:
# 用户传入自定义路径
self.c_file_path = Path(c_file_path)
self._is_internal_c_file = False
if not self.c_file_path.exists():
raise FileNotFoundError(f"找不到 C 源文件: {self.c_file_path}")
self.used_ranges = []
self.array_macros = {}
# array_name -> [RANGE_xxx, ...]
# self.array_macros = {
# "channel_ranges_2g_20m": [
# "RANGE_2G_20M_1_11",
# "RANGE_2G_20M_6_6"
# ],
# "channel_ranges_5g_80m": [
# "RANGE_5G_80M_36_48",
# "RANGE_5G_80M_149_161"
# ]
# }
self.struct_entries = {}
# array_name -> [{"low": int, "high": int}, ...]
# self.struct_entries = {
# "channel_ranges_2g_20m": [
# {"low": 1, "high": 11},
# {"low": 6, "high": 6}
# ],
# "channel_ranges_5g_80m": [
# {"low": 36, "high": 48},
# {"low": 149, "high": 161}
# ]
# }
self.enum_to_index = {} # RANGE_xxx -> index (from enum)
if "STR_CHANNEL_RANGE" not in self.config:
raise KeyError(" config 文件缺少 'STR_CHANNEL_RANGE' 字段")
self.start_marker = self.config["STR_CHANNEL_RANGE"]
if "END_CHANNEL_RANGE" not in self.config:
raise KeyError(" config 文件缺少 'END_CHANNEL_RANGE' 字段")
self.end_marker = self.config["END_CHANNEL_RANGE"]
def offset_to_lineno(self, content: str, offset: int) -> int:
"""将字符偏移量转换为行号(从1开始)"""
return content.count('\n', 0, offset) + 1
def load_config(self):
"""加载并解析 config.json"""
with open(self.config_file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
if "used_ranges" not in data:
raise KeyError(" config 文件缺少 'used_ranges' 字段")
valid_ranges = []
for item in data["used_ranges"]:
if isinstance(item, str) and re.match(r'^RANGE_[\w\d_]+_\d+_\d+$', item):
valid_ranges.append(item)
else:
self.logger.warning(f"跳过无效项: {item}")
self.used_ranges = sorted(set(valid_ranges))
self.logger.info(f"已从 {self.config_file_path} 加载 {len(self.used_ranges)} 个有效 RANGE 宏")
def parse_c_arrays(self):
"""解析 C 文件中的 channel_ranges_xxx[] 数组 和 enum range_xxx"""
content = self.c_file_path.read_text(encoding='utf-8')
start_idx = content.find(self.start_marker)
end_idx = content.find(self.end_marker)
if start_idx == -1 or end_idx == -1:
raise ValueError("未找到 CHANNEL RANGES 注释锚点")
block = content[start_idx:end_idx]
start_line = self.offset_to_lineno(content, start_idx)
end_line = self.offset_to_lineno(content, end_idx)
self.logger.info(
f"找到标记范围:{self.start_marker} → 第 {start_line} 行, {self.end_marker} → 第 {end_line} 行")
# === 1. 解析数组:static const struct clm_channel_range xxx[] = { ... };
array_pattern = re.compile(
r'static\s+const\s+struct\s+clm_channel_range\s+(channel_ranges_[\w\d]+)\s*\[\s*\]\s*=\s*\{(.*?)\}\s*;',
re.DOTALL | re.MULTILINE
)
for array_name, body in array_pattern.findall(block):
entries = []
for low, high in re.findall(r'\{\s*(\d+)\s*,\s*(\d+)\s*\}', body):
entries.append({"low": int(low), "high": int(high)})
self.struct_entries[array_name] = entries
self.array_macros[array_name] = [] # 先留空,后续从 enum 填充
self.logger.info(f" 解析数组 {array_name}: {len(entries)} 个范围项")
# === 2. 解析枚举:enum range_xxx { RANGE_A = 0, ... }
enum_pattern = re.compile(r'enum\s+range_([a-z\d_]+)\s*\{([^}]*)\}', re.DOTALL | re.IGNORECASE)
if enum_pattern.search(block):
self.logger.info("block 中存在匹配的 enum range")
else:
self.logger.info("未找到匹配的 enum range")
#self.logger.info(f"block 片段预览:\n{block[:2500]}")
for match in enum_pattern.finditer(block):
suffix = match.group(1) # 如 '2g_40m'
enum_body = match.group(2)
array_name = f"channel_ranges_{suffix}"
#self.logger.info(f" 解析 {enum_body} {suffix}")
if array_name not in self.struct_entries:
self.logger.warning(f" 找到 enum {match.group(0)[:30]}... 但无对应数组")
continue
# 提取 RANGE_xxx = N
for macro, idx_str in re.findall(r'(RANGE_[\w\d_]+)\s*=\s*(\d+)', enum_body):
idx = int(idx_str)
if idx >= len(self.struct_entries[array_name]):
self.logger.warning(f" 索引越界: {macro} = {idx} > 数组长度 {len(self.struct_entries[array_name])}")
continue
self.enum_to_index[macro] = idx
if macro not in self.array_macros[array_name]:
self.array_macros[array_name].append(macro)
#self.logger.info(f" 关联 {macro} → {array_name}[{idx}]")
self.logger.info(f" 总共建立 {len(self.enum_to_index)} 个宏与数组项的映射关系")
def get_array_name_for_range(self, range_macro):
"""根据 RANGE 宏推断应属于哪个数组"""
match = re.match(r'RANGE_([0-9]+[A-Za-z])_([0-9]+)M_', range_macro, re.IGNORECASE)
if not match:
self.logger.warning(f"无法推断数组名: {range_macro}")
return None
band = match.group(1).lower() # '2g'
bw = match.group(2) # '20'
return f"channel_ranges_{band}_{bw}m"
def extract_channels_from_macro(self, macro):
"""
从宏字符串中提取信道范围。
Args:
macro (str): 格式如 RANGE_2G_20M_1_11
Returns:
tuple: (low, high) 或 (None, None)
"""
match = re.search(r'_(\d+)_(\d+)$', macro)
if match:
low = int(match.group(1))
high = int(match.group(2))
return low, high
self.logger.warning(f"宏格式错误,无法提取信道: {macro}")
return None, None
def validate_and_repair(self):
"""确保每个 used_range 都在正确的数组中"""
modified = False
changes = []
for range_macro in self.used_ranges:
array_name = self.get_array_name_for_range(range_macro)
if not array_name:
self.logger.warning(f"无法识别数组类型,跳过: {range_macro}")
continue
# --- 检查宏是否已在 enum 映射中 ---
existing_idx = None
for macro, idx in self.enum_to_index.items():
if macro == range_macro and self.get_array_name_for_range(macro) == array_name:
existing_idx = idx
break
if existing_idx is None:
# 新宏,需分配新索引
next_idx = len(self.struct_entries[array_name])
low, high = self.extract_channels_from_macro(range_macro)
if low is not None and high is not None:
self.struct_entries[array_name].append({"low": low, "high": high})
#self.logger.info(f"数组{array_name}: {self.struct_entries[array_name]}")
self.array_macros[array_name].append(range_macro)
#self.logger.info(f"枚举: {array_name}: {self.array_macros[array_name]}")
self.enum_to_index[range_macro] = next_idx
changes.append(f"扩展枚举: {range_macro} → {{{low}, {high}}} (index={next_idx})")
#self.logger.info(f"扩展枚举: {range_macro} → {{{low}, {high}}} (index={next_idx})")
modified = True
else:
self.logger.warning(f"无法解析信道范围: {range_macro}")
if modified and not self.dry_run:
self._write_back_in_block()
self.logger.info("C 文件已更新")
elif modified and self.dry_run:
self.logger.info("DRY-RUN MODE: 有变更但不会写入文件")
else:
self.logger.info(" 所有 RANGE 已存在,无需修改")
if modified:
self.logger.info(f" 共需添加 {len(changes)} 项:\n" + "\n".join(f" → {ch}" for ch in changes))
return modified
def _infer_array_from_enum(self, enum_decl):
"""从 enum 声明推断对应的数组名"""
match = re.search(r'enum\s+range_([a-z\d_]+)', enum_decl)
if match:
return f"channel_ranges_{match.group(1)}"
return None
def _format_array_body(self, structs, indent=" "):
"""格式化结构体数组内容,每行最多4个,数字右对齐"""
items = [f"{{ {s['low']:>2d}, {s['high']:>2d} }}" for s in structs]
lines = []
for i in range(0, len(items), 4):
group = items[i:i + 4]
lines.append(indent + ", ".join(group))
return "\n".join(lines)
def _write_back_in_block(self):
"""安全地一次性更新 C 文件中的数组和枚举定义"""
if self.dry_run:
self.logger.info("DRY-RUN: 跳过写入文件")
return
try:
content = self.c_file_path.read_text(encoding='utf-8')
start_idx = content.find(self.start_marker)
end_idx = content.find(self.end_marker) + len(self.end_marker)
if start_idx == -1 or end_idx == -1:
raise ValueError("未找到 CHANNEL RANGES 标记块")
header = content[:start_idx]
footer = content[end_idx:]
block = content[start_idx:end_idx]
replacements = [] # (start, end, replacement)
# === 工具函数:移除注释避免误匹配 ===
def remove_comments(text):
text = re.sub(r'//.*$', '', text, flags=re.MULTILINE)
text = re.sub(r'/\*.*?\*/', '', text, flags=re.DOTALL)
return text
# === 1. 更新 channel_ranges_xxx[] 数组:只在末尾添加新项 ===
array_pattern = re.compile(
r'(\b(channel_ranges_[\w\d]+)\s*\[\s*\]\s*=\s*\{)(.*?)(\}\s*;\s*)',
re.DOTALL | re.MULTILINE
)
matches = list(array_pattern.finditer(block))
self.logger.info(f" 找到 {len(matches)} 个 channel_ranges 数组")
for i, match in enumerate(matches):
self.logger.info(f" 匹配 {i + 1}: 数组名={match.group(2)}, 起始位置={match.start()}")
for match in array_pattern.finditer(block):
array_name = match.group(2)
if array_name not in self.struct_entries:
self.logger.warning(f" 未找到 {array_name} 数组")
continue
structs = self.struct_entries[array_name]
body_content = match.group(3) # 直接就是 {} 中间的原始内容(含空格换行)
original_end = match.end() # 不变,仍是整个声明结束位置
self.logger.info(f" 数组 {array_name} 内容: {body_content}")
# 提取第一行缩进(用于新行)
first_line = body_content.split('\n')[0] if body_content.strip() else ""
indent_match = re.match(r'^(\s*)', first_line)
indent = indent_match.group(1) if indent_match else " "
# === 智能插入 {low, high}, 条目(支持同行追加 & 对齐)===
existing_items = []
item_pattern = r'\{\s*(\d+)\s*,\s*(\d+)\s*\}'
for m in re.finditer(item_pattern, body_content):
low, high = int(m.group(1)), int(m.group(2))
existing_items.append((low, high))
all_matches = list(re.finditer(item_pattern, body_content))
if not all_matches:
insert_pos_in_body = len(body_content)
else:
last_match = all_matches[-1]
close_brace_pos = body_content.find('}', last_match.start())
insert_pos_in_body = close_brace_pos + 2 if close_brace_pos != -1 else last_match.end()
inserted_count = len(existing_items)
if inserted_count >= len(structs):
continue
new_item = structs[inserted_count]
low, high = new_item['low'], new_item['high']
if (low, high) in existing_items:
self.logger.warning(f"已存在 {low}, {high} 项,跳过")
continue
# 分析最后一行用于格式继承
lines = [line for line in body_content.split('\n') if line.strip()]
last_line = lines[-1] if lines else ""
# 推断缩进和对齐位置
indent_match = re.match(r'^(\s*)', last_line) if last_line else None
line_indent = (indent_match.group(1) if indent_match else "") if last_line else " "
expanded_last = last_line.expandtabs(4) if last_line else ""
first_struct_match = re.search(r'\{', remove_comments(last_line)) if last_line else None
if first_struct_match:
raw_before = last_line[:first_struct_match.start()]
expanded_before = raw_before.expandtabs(4)
target_struct_start_col = len(expanded_before)
else:
target_struct_start_col = len(line_indent.replace('\t', ' '))
# 计算当前行已有多少个结构体
clean_last = remove_comments(last_line) if last_line else ""
visible_structs = len(re.findall(r'\{\s*\d+\s*,\s*\d+\s*\}', clean_last))
MAX_PER_LINE = 8
formatted_item = f"{{ {low:>2d}, {high:>2d} }}"
# 决定插入方式
if visible_structs < MAX_PER_LINE and last_line.strip():
# 同行追加
insertion = f" {formatted_item},"
else:
# 换行对齐
raw_indent_len = len(line_indent.replace('\t', ' '))
leading_spaces = max(0, target_struct_start_col - raw_indent_len)
padding = ' ' * leading_spaces
insertion = f"\n{line_indent}{padding}{formatted_item},"
# 记录插入点
insert_offset_in_block = match.start(3) + insert_pos_in_body
replacements.append((insert_offset_in_block, insert_offset_in_block, insertion))
self.logger.info(f"插入: {insertion.strip()} → 偏移 {insert_offset_in_block}")
range_macro = f"RANGE_{array_name.upper().replace('CHANNEL_RANGES_', '').replace('_', '_')}_{low}_{high}"
self.logger.info(f"扩展数组: {range_macro} → {{{low}, {high}}} (index={inserted_count})")
# === 2. 更新 enum range_xxx:精确继承上一行宏名左对齐与 '=' 对齐 ===
enum_pattern = re.compile(r'(enum\s+range_[\w\d_]+\s*\{)([^}]*)\}\s*;', re.DOTALL)
for match in enum_pattern.finditer(block):
enum_name_match = re.search(r'range_([a-zA-Z0-9_]+)', match.group(0))
if not enum_name_match:
continue
inferred_array = f"channel_ranges_{enum_name_match.group(1)}"
if inferred_array not in self.array_macros:
continue
macro_list = self.array_macros[inferred_array]
enum_body = match.group(2)
# 解析已有宏及其值
existing_macros = dict(re.findall(r'(RANGE_[\w\d_]+)\s*=\s*(\d+)', remove_comments(enum_body)))
next_id = len(existing_macros)
if next_id >= len(macro_list):
continue
new_macro = macro_list[next_id]
# 获取非空行
lines = [line for line in enum_body.split('\n') if line.strip()]
last_line = lines[-1] if lines else ""
if not last_line.strip():
# fallback 缩进
line_indent = " "
target_macro_start_col = 4
target_eq_col = 32
else:
indent_match = re.match(r'^(\s*)', last_line)
line_indent = indent_match.group(1) if indent_match else " "
# 展开 tab(统一按 4 空格处理)
expanded_last = last_line.expandtabs(4)
# 提取第一个 RANGE_xxx 宏名
first_macro_match = re.search(r'RANGE_[\w\d_]+', remove_comments(last_line))
if not first_macro_match:
target_macro_start_col = len(line_indent)
target_eq_col = 32
else:
macro_text = first_macro_match.group(0)
macro_start = first_macro_match.start()
# 计算视觉起始列(基于展开后的字符串)
raw_before = last_line[:macro_start]
expanded_before = raw_before.expandtabs(4)
target_macro_start_col = len(expanded_before)
# 找第一个 "=" 的视觉列
eq_match = re.search(r'=\s*\d+', last_line[macro_start:])
if eq_match:
eq_abs_start = macro_start + eq_match.start()
raw_eq_part = last_line[:eq_abs_start]
expanded_eq_part = raw_eq_part.expandtabs(4)
target_eq_col = len(expanded_eq_part)
else:
# fallback
target_eq_col = target_macro_start_col + len(macro_text) + 8
# 现在我们知道:
# - 宏名应该从第 target_macro_start_col 列开始(视觉)
# - `=` 应该出现在 target_eq_col 列
# 计算当前宏名需要多少前置空格才能对齐
current_visual_len = len(new_macro.replace('\t', ' '))
padding_to_eq = max(1, target_eq_col - target_macro_start_col - current_visual_len)
full_padding = ' ' * padding_to_eq
formatted_new = f"{new_macro}{full_padding}= {next_id}"
# 判断是否同行追加(最多 4 个)
clean_last = remove_comments(last_line)
visible_macros = len(re.findall(r'RANGE_[\w\d_]+', clean_last))
if visible_macros < 4 and last_line.strip():
# 同行追加:前面加两个空格分隔
separator = " "
updated_content = last_line + separator + formatted_new + ","
new_body = enum_body.rsplit(last_line, 1)[0] + updated_content
else:
# 换行:使用原始 indent 开头,然后补足到 target_macro_start_col
raw_indent_len = len(line_indent.replace('\t', ' '))
leading_spaces_needed = max(0, target_macro_start_col - raw_indent_len)
prefix_padding = ' ' * leading_spaces_needed
new_line = f"{line_indent}{prefix_padding}{formatted_new},"
trailing = enum_body.rstrip()
maybe_comma = "," if not trailing.endswith(',') else ""
new_body = f"{trailing}{maybe_comma}\n{new_line}"
# 重建 enum
new_enum = f"{match.group(1)}{new_body}\n}};"
replacements.append((match.start(), match.end(), new_enum))
self.logger.info(f"扩展枚举: {new_macro} = {next_id}")
# === 应用替换:倒序防止 offset 错乱 ===
replacements.sort(key=lambda x: x[0], reverse=True)
result_block = block
for start, end, r in replacements:
result_block = result_block[:start] + r + result_block[end:]
# 写回前备份
backup_path = self.c_file_path.with_suffix('.c.bak')
copy2(self.c_file_path, backup_path)
self.logger.info(f"已备份 → {backup_path}")
# 写入新内容
self.c_file_path.write_text(header + result_block + footer, encoding='utf-8')
self.logger.info(f" 成功保存修改: {self.c_file_path}")
except Exception as e:
self.logger.error(f"写回文件失败: {e}", exc_info=True)
raise
def run(self):
self.logger.info("开始同步 CLM RANGE 定义...")
try:
self.load_config()
self.parse_c_arrays()
was_modified = self.validate_and_repair()
if was_modified:
if self.dry_run:
self.logger.info(" 预览模式:检测到变更,但不会写入文件")
else:
self.logger.info(" 同步完成:已成功更新 C 文件")
else:
self.logger.info(" 所有 RANGE 已存在,无需修改")
return was_modified
except Exception as e:
self.logger.error(f" 同步失败: {e}", exc_info=True)
raise
def main():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
handlers=[
logging.FileHandler(LOG_FILE, encoding='utf-8'),
logging.StreamHandler(sys.stdout)
],
force=True
)
logger = logging.getLogger(__name__)
# 固定配置
c_file_path = "input/wlc_clm_data_6726b0.c"
dry_run = False
log_level = "INFO"
config_path = "config/config.json"
logging.getLogger().setLevel(log_level)
print(f" 开始同步 RANGE 定义...")
print(f" C 源文件: {c_file_path}")
if dry_run:
print(" 启用 dry-run 模式:仅预览变更,不修改文件")
try:
sync = CLMRangeSynchronizer(
c_file_path=None,
dry_run=dry_run,
config_path=config_path,
)
sync.run()
print(" 同步完成!")
print(f" 详细日志已保存至: {LOG_FILE}")
except FileNotFoundError as e:
logger.error(f"文件未找到: {e}")
print(" 请检查文件路径是否正确。")
sys.exit(1)
except PermissionError as e:
logger.error(f"权限错误: {e}")
print(" 无法读取或写入文件,请检查权限。")
sys.exit(1)
except Exception as e:
logger.error(f"程序异常退出: {e}", exc_info=True)
sys.exit(1)
if __name__ == '__main__':
main()
能不能参考写入cahnnel的文件的风格,帮我修改写入power locale的文件# power/power_sync.py
import os
import re
import shutil
from pathlib import Path
import logging
from typing import Dict, List
from datetime import datetime
import json
from utils import resource_path
# --- 全局日志配置 ---
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
LOG_DIR = PROJECT_ROOT / "output" / "log"
LOG_DIR.mkdir(parents=True, exist_ok=True)
LOG_FILE = LOG_DIR / f"power_sync_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
class PowerTableSynchronizer:
# ——————————————————————————
# 1. 初始化与配置加载
# ——————————————————————————
def __init__(self, config_path= "config/config.json", dry_run = False):
self.dry_run = dry_run
# === Step 1: 使用 resource_path 定位项目根目录或资源基础路径 ===
# 注意:以 resource_path 为基准
base_dir = resource_path(".")# 指向 _MEIPASS 或 当前目录
self.project_root = base_dir.resolve()
self.output_dir = self.project_root / "output"
self.log_dir = self.output_dir / "log"
self.log_dir.mkdir(parents=True, exist_ok=True)
self.logger = logging.getLogger(__name__)
# === Step 2: 配置文件路径处理 ===
self.config_file_path = resource_path(config_path)
self.logger.info(f"配置文件: {self.config_file_path}")
if not os.path.exists(self.config_file_path):
raise FileNotFoundError(f"配置文件不存在: {self.config_file_path}")
try:
with open(self.config_file_path, 'r', encoding='utf-8') as f:
self.config = json.load(f)
print(f"配置文件已加载: {self.config_file_path}")
except json.JSONDecodeError as e:
raise ValueError(f"配置文件格式错误,JSON 解析失败: {self.config_file_path}") from e
except Exception as e:
raise RuntimeError(f"读取配置文件时发生未知错误: {e}") from e
# === Step 3: C 文件路径处理 ===
if "target_c_file" not in self.config:
raise KeyError(" config 文件缺少 'target_c_file' 字段")
rel_c_path = self.config["target_c_file"]
self.target_c_file = resource_path(rel_c_path)
if not os.path.exists(self.target_c_file):
raise FileNotFoundError(f"配置中指定的 C 源文件不存在: {self.target_c_file}")
logging.info(f" 已定位目标 C 文件: {self.target_c_file}")
self.platform = self.config.get("platform_rules", {})
# === Step 4:标记宏定义 ===
if "STR_POWER_LOCALE" not in self.config:
raise KeyError(" config 文件缺少 'STR_POWER_LOCALE' 字段")
self.start_marker = self.config["STR_POWER_LOCALE"]
if "END_POWER_LOCALE" not in self.config:
raise KeyError(" config 文件缺少 'END_POWER_LOCALE' 字段")
self.end_marker = self.config["END_POWER_LOCALE"]
# ——————————————————————————
# 2. 主接口(核心入口放最前面)
# ——————————————————————————
def sync_all(self,
locale_entries: Dict[str, str],
power_tables: Dict[str, str],
country_entries: List[str]):
"""
一站式同步所有 CLM 内容
"""
self.logger.info(f"开始执行 CLM 数据同步任务...")
self.logger.info(f"目标文件: {self.target_c_file.name}")
self.logger.info(f"预览模式: {'是' if self.dry_run else '否'}")
try:
content = self.load_content()
content = self.sync_locales_enum(content, locale_entries)
content = self.sync_power_tables(content, power_tables)
content = self.sync_country_definitions(content, country_entries)
self.save_content(content)
self.logger.info(" 全部同步任务完成!")
except Exception as e:
self.logger.info(f" 同步过程中发生错误: {str(e)}")
raise
# ——————————————————————————
# 3. 核心同步流程(按执行顺序排列)
# ——————————————————————————
def load_content(self) -> str:
"""读取目标 C 文件内容"""
try:
content = self.target_c_file.read_text(encoding='utf-8')
self.logger.info(f" 已读取文件: {self.target_c_file.name} ({len(content)} 字符)")
return content
except Exception as e:
self.logger.info(f" 读取文件失败: {e}")
raise
def sync_locales_enum(self, content: str, new_entries: Dict[str, str]) -> str:
"""同步 enum locale_xxx_idx 区域"""
self.logger.info(" 开始同步 LOCALE ENUMS...")
try:
block, start, end = self._extract_section(content, "enums")
except ValueError:
return content
lines = [line.rstrip() for line in block.splitlines()]
output_lines = []
current_enum = None
modified = False
i = 0
while i < len(lines):
line = lines[i]
stripped = line.strip()
enum_match = re.match(r"enum\s+([a-zA-Z0-9_]+)", stripped)
if enum_match:
enum_name = enum_match.group(1)
pattern = self.platform.get("locale_enum", {}).get("pattern", r"locale_(\w+)_idx")
if re.fullmatch(pattern.replace("\\", ""), enum_name):
current_enum = enum_name
output_lines.append(line)
i += 1
continue
if stripped.endswith("};") and current_enum:
key = current_enum.lower()
if key in new_entries:
entry_code = new_entries[key]
last_line = output_lines[-1].rstrip()
if not (last_line.endswith(",") or last_line.endswith("{")):
output_lines[-1] = last_line + ","
output_lines.append(f" {entry_code},")
self.logger.info(f" 新增枚举: {key} → {entry_code}")
modified = True
output_lines.append(line)
current_enum = None
i += 1
continue
output_lines.append(line)
i += 1
if not modified:
self.logger.info(" 枚举区无变更")
return content
new_block = "\n".join(output_lines)
updated = self._replace_section(content, "enums", new_block)
self.logger.info(" 枚举区更新完成")
return updated
def sync_power_tables(self, content: str, tables: Dict[str, str]) -> str:
"""同步功率表数组"""
self.logger.info(" 开始同步 POWER TABLES...")
try:
block, start, end = self._extract_section(content, "power_tables")
self.logger.info(f" 已定位功率表区: {start} → {end}")
except ValueError:
return content
prefix = self.platform["power_table"]["prefix"]
suffix = self.platform["power_table"]["suffix"].replace("[", "\\[").replace("]", "\\]")
array_pattern = r'static\s+const\s+unsigned\s+char\s+(' + prefix + r'[a-zA-Z0-9_]+)' + suffix + r'\s*=\s*\{[^}]*\}\s*;'
existing_names = set(re.findall(array_pattern, block, re.IGNORECASE))
all_arrays = []
for match in re.finditer(array_pattern, block, re.DOTALL | re.IGNORECASE):
all_arrays.append(match.group(0))
added = []
for key, declaration in tables.items():
if key not in existing_names:
all_arrays.append(declaration.strip() + ";")
added.append(key)
self.logger.info(f" 新增功率表: {prefix}{key}{suffix}")
if not added:
self.logger.info(" 功率表区无新增")
return content
new_block = "\n\n".join(all_arrays)
updated = self._replace_section(content, "power_tables", new_block)
self.logger.info(f" 功率表更新完成,新增: {', '.join(added)}")
return updated
def sync_country_definitions(self, content: str, country_entries: List[str]) -> str:
"""同步国家码定义 REGION(...)"""
self.logger.info(" 开始同步 COUNTRY DEFINITIONS...")
try:
block, start, end = self._extract_section(content, "country_definitions")
except ValueError:
return content
pattern = self.platform["country_definition"]["pattern"]
existing_lines = [line.strip() for line in block.split('\n') if line.strip()]
country_map = {}
for line in existing_lines:
cc_match = re.search(pattern, line)
if cc_match:
country_map[cc_match.group(1)] = line
new_entries = []
for entry in country_entries:
cc_match = re.search(pattern, entry)
if cc_match:
cc = cc_match.group(1)
if cc not in country_map:
new_entries.append(entry.strip())
self.logger.info(f" 新增国家码: {cc}")
if not new_entries:
self.logger.info(" 国家码无新增")
return content
all_lines = existing_lines + new_entries
new_block = "\n".join(all_lines)
updated = self._replace_section(content, "country_definitions", new_block)
self.logger.info(f" 国家码更新完成,新增 {len(new_entries)} 个")
return updated
# ——————————————————————————
# 4. 内部文本操作辅助方法
# ——————————————————————————
def _extract_section(self, content: str, section: str) -> tuple[str, int, int]:
"""提取指定区域内容"""
try:
anchors = self.platform["anchors"][section]
start_marker = anchors["begin"]
end_marker = anchors["end"]
except KeyError as e:
raise ValueError(f"配置错误:缺少锚点定义 {section}") from e
start_pos = content.find(start_marker)
end_pos = content.find(end_marker)
if start_pos == -1 or end_pos == -1:
missing = "起始标记" if start_pos == -1 else "结束标记"
msg = f"未找到区域 {section} 的 {missing}: {start_marker[:30]}..."
logging.warning(msg)
raise ValueError(msg)
inner_start = start_pos + len(start_marker)
inner_end = end_pos
extracted = content[inner_start:inner_end].strip()
self.logger.info(f" 提取区域 [{section}]: {len(extracted)} 字符")
return extracted, inner_start, inner_end
def _replace_section(self, content: str, section: str, new_content: str) -> str:
"""替换指定区域内容"""
try:
anchors = self.platform["anchors"][section]
start_marker = anchors["begin"]
end_marker = anchors["end"]
except KeyError as e:
raise ValueError(f"配置错误:无法替换区域 {section}") from e
start_pos = content.find(start_marker)
end_pos = content.find(end_marker)
if start_pos == -1 or end_pos == -1:
raise ValueError(f"无法替换区域 {section},缺失锚点")
return content[:start_pos + len(start_marker)] + "\n" + new_content + "\n" + content[end_pos:]
def save_content(self, content: str):
"""保存内容并创建备份"""
if self.dry_run:
self.logger.info(" 【预览模式】跳过实际写入")
return
try:
backup_path = self.target_c_file.with_suffix(self.target_c_file.suffix + ".bak")
shutil.copy2(self.target_c_file, backup_path)
self.logger.info(f" 已创建备份: {backup_path.name}")
with open(self.target_c_file, 'w', encoding='utf-8') as f:
f.write(content)
self.logger.info(f" 成功更新文件: {self.target_c_file.name}")
except Exception as e:
self.logger.info(f"写入文件失败: {e}")
raise
# ——————————————————————————
# 5. 工具函数(静态方法,独立于实例)
# ——————————————————————————
@staticmethod
def _extract_locale_block(content: str, locale_name: str) -> str | None:
"""
从 tx_limit_table.c 中提取 /* Locale NAME */ 对应的数据块
返回去注释后的干净数据字符串
"""
pattern = rf'/\*\s*Locale\s+{locale_name}\s*\*/\s*([^/]*)'
match = re.search(pattern, content, re.DOTALL)
if not match:
return None
block = match.group(1).strip()
block = re.sub(r'/\*.*?\*/', '', block) # 去除注释
block = re.sub(r'\s+', ' ', block).strip() # 归一化空白
return block
# ——————————————————————————
# 主函数:注入 DEFAULT 功率表
# ——————————————————————————
def main():
"""
主函数:注入 DEFAULT 功率表
使用项目根目录自动推导路径,不依赖当前工作目录
"""
import sys
from pathlib import Path
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
handlers=[
logging.FileHandler(LOG_FILE, encoding='utf-8'),
logging.StreamHandler(sys.stdout)
],
force=True
)
log_level = "INFO"
logging.getLogger().setLevel(log_level)
logger = logging.getLogger(__name__)
project_root = Path(__file__).parent.parent # F:\issue
CONFIG_FILE = project_root / "config" / "config.json"
GENERATED_FILE = project_root / "output" / "tx_limit_table.c"
TARGET_C_FILE = project_root / "input" / "wlc_clm_data_6726b0.c"
# 检查必要文件
for path, desc in [(CONFIG_FILE, "配置文件"), (GENERATED_FILE, "生成数据文件"), (TARGET_C_FILE, "目标C文件")]:
if not path.exists():
logger.error(f" {desc}不存在: {path}", file=sys.stderr)
sys.exit(1)
try:
gen_content = GENERATED_FILE.read_text(encoding='utf-8')
base_block = PowerTableSynchronizer._extract_locale_block(gen_content, "DEFAULT")
ht_block = PowerTableSynchronizer._extract_locale_block(gen_content, "DEFAULT_HT")
if not base_block and not ht_block:
raise ValueError("未提取到任何 DEFAULT 功率表数据")
locale_entries = {
"locale_2g_idx": "LOCALE_2G_IDX_DEFAULT"
}
power_tables = {}
if base_block:
power_tables["2g_base_DEFAULT"] = (
f"static const unsigned char locales_2g_base_DEFAULT[] = {{\n {base_block}\n}}"
)
if ht_block:
power_tables["2g_ht_DEFAULT"] = (
f"static const unsigned char locales_2g_ht_DEFAULT[] = {{\n {ht_block}\n}}"
)
country_entries = [
'REGION("CN", LOCALE_2G_IDX_DEFAULT)',
'REGION("US", LOCALE_2G_IDX_DEFAULT)',
]
synchronizer = PowerTableSynchronizer(
config_path=str(CONFIG_FILE),
dry_run=False
)
synchronizer.sync_all(
locale_entries=locale_entries,
power_tables=power_tables,
country_entries=country_entries
)
logger.info(" 成功注入 DEFAULT 功率表!")
logger.info(f" 详细日志已保存至: {LOG_FILE}")
except Exception as e:
logger.error(f" 错误: {e}")
sys.exit(1)
# ——————————————————————————
# 运行入口(可选)
# ——————————————————————————
if __name__ == "__main__":
main()
最新发布