FederatedScope的.yaml配置文件

FederatedScope的.yaml配置文件

.yaml文件主要适用于配置程序运行所需的一些参数信息,如联邦学习相关配置、数据集相关配置、数据加载器相关配置、优化器相关配置、模型评估相关配置、网络模型相关配置等。以下示例配置文件中做出了部分配置,仅供大家学习使用。
详细使用可点击这里查看

aggregator:
  BFT_args:  
  byzantine_node_num: 0
  inside_weight: 1.0
  num_agg_groups: 1
  num_agg_topk: []
  outside_weight: 0.0
  robust_rule: fedavg
asyn:
  use: False
attack:
  alpha_TV: 0.001
  alpha_prop_loss: 0
  attack_method: 
  attacker_id: -1
  classifier_PIA: randomforest
  edge_num: 100
  edge_path: edge_data/
  freq: 10
  info_diff_type: l2
  inject_round: 0
  insert_round: 100000
  label_type: dirty
  max_ite: 400
  mean: [0.9637]
  mia_is_simulate_in: False
  mia_simulate_in_round: 20
  pgd_eps: 2
  pgd_lr: 0.1
  pgd_poisoning: False
  poison_ratio: 0.5
  reconstruct_lr: 0.01
  reconstruct_optim: Adam
  scale_para: 1.0
  scale_poisoning: False
  self_epoch: 6
  self_lr: 0.05
  self_opt: False
  setting: fix
  std: [0.1592]
  target_label_ind: -1
  trigger_path: trigger/
  trigger_type: edge
backend: torch
cfg_file: 
check_completeness: False
criterion:
  type: CrossEntropyLoss
# 数据集相关配置
data:
  args: []
  batch_size: 64
  cSBM_phi: [0.5, 0.5, 0.5]
  cache_dir: 
  consistent_label_distribution: True
  drop_last: False
  file_path: 
  hetero_data_name: []
  hetero_synth_batch_size: 32
  hetero_synth_feat_dim: 128
  hetero_synth_prim_weight: 0.5
  is_debug: False
  loader: 
  max_query_len: 128
  max_seq_len: 384
  max_tgt_len: 128
  num_contrast: 0
  num_of_client_for_data: []
  num_steps: 30
  num_workers: 0
  pre_transform: []
  quadratic:
    dim: 1
    max_curv: 12.5
    min_curv: 0.02
  root: data/   # 数据集存放的目录
  save_data: False  # 
  server_holds_all: False  # 
  shuffle: True  # 是否shuffle
  sizes: [10, 5]  
  splits: [0.6, 0.2, 0.2]  # [训练集,验证集,测试集]
  splitter:       # 拆分器
  splitter_args: []
  subsample: 0.05
  target_transform: []
  test_pre_transform: []
  test_target_transform: []
  test_transform: []
  transform: [['ToTensor', {}], ['Normalize', {'mean': [0.9637], 'std': [0.1592]}]]  # transform
  trunc_stride: 128
  type: femnist  # 数据集的名称
  val_pre_transform: []
  val_target_transform: []
  val_transform: []
  walk_length: 2
# 数据加载器相关配置
dataloader:
  batch_size: 10
  drop_last: False
  num_steps: 30
  num_workers: 0
  pin_memory: False
  shuffle: True
  sizes: [10, 5]
  theta: -1
  type: base
  walk_length: 2
# 设备
device: 0
# 是否采用分布式模式
distribute:
  use: False
early_stop:
  delta: 0.0
  improve_indicator_mode: best
  patience: 5
# 模型评估相关配置
eval:
  best_res_update_round_wise_key: val_loss
  count_flops: True
  freq: 10
  metrics: ['acc', 'correct']
  monitoring: []
  report: ['weighted_avg', 'avg', 'fairness', 'raw']
  split: ['test', 'val']
# 程序运行输出日志的名字
expname: FedAvg_convnet2_on_femnist_lr0.01_lstep1
expname_tag: 
feat_engr:
  num_bins: 5
  scenario: hfl
  secure:
    dp:
      
    encrypt:
      type: dummy
    key_size: 3072
    type: encrypt
  selec_threshold: 0.05
  selec_woe_binning: quantile
  type: 
# 联邦学习相关配置
federate:
  atc_load_from:   
  atc_vanilla: False  
  client_num: 100   # 客户端的数量
  data_weighted_aggr: False
  ignore_weight: False
  join_in_info: []
  make_global_eval: False
  master_addr: 127.0.0.1
  master_port: 29500
  merge_test_data: False
  merge_val_data: False
  method: FedAvg   # 联邦学习算法
  mode: standalone  # 运行模式(独立模式)
  online_aggr: False
  process_num: 1
  resource_info_file: 
  restore_from: 
  sample_client_num: 20   # 每一轮次服务器把模型分配给几个客户端
  sample_client_rate: 0.2   # 
  sampler: uniform
  save_to: 
  share_local_model: False
  total_round_num: 300   # 训练轮次
  unseen_clients_rate: 0.0
  use_diff: False
  use_ss: False
fedopt:
  use: False
fedprox:
  use: False
fedsageplus:
  a: 1.0
  b: 1.0
  c: 1.0
  fedgen_epoch: 200
  gen_hidden: 128
  hide_portion: 0.5
  loc_epoch: 1
  num_pred: 5
fedswa:
  use: False
finetune:
  batch_or_epoch: epoch
  before_eval: False
  epoch_linear: 10
  freeze_param: 
  local_param: []
  local_update_steps: 1
  lr_linear: 0.005
 # 优化器相关配置
  optimizer:
    lr: 0.1   # 学习率
    type: SGD  # 优化器
  scheduler:
    type: 
    warmup_ratio: 0.0
  simple_tuning: False
  weight_decay: 0.0
flitplus:
  factor_ema: 0.8
  lambdavat: 0.5
  tmpFed: 0.5
  weightReg: 1.0
gcflplus:
  EPS_1: 0.05
  EPS_2: 0.1
  seq_length: 5
  standardize: False
grad:
  grad_accum_count: 1
  grad_clip: 5.0
hpo:
  fedex:
    cutoff: 0.0
    diff: False
    eta0: -1.0
    flatten_ss: True
    gamma: 0.0
    pi_lr: 0.01
    psn: False
    sched: auto
    ss: 
    use: False
  fts:
    M: 100
    M_target: 200
    allow_load_existing_info: True
    diff: False
    fed_bo_max_iter: 50
    g_var: 1e-06
    gp_opt_schedule: 1
    local_bo_epochs: 50
    local_bo_max_iter: 50
    ls: 1.0
    obs_noise: 1e-06
    ss: 
    target_clients: []
    use: False
    v_kernel: 1.0
    var: 0.1
  init_cand_num: 16
  larger_better: False
  metric: client_summarized_weighted_avg.val_loss
  num_workers: 0
  pbt:
    max_stage: 5
    perf_threshold: 0.1
  pfedhpo:
    discrete: False
    ss: 
    target_fl_total_round: 1000
    train_anchor: False
    train_fl: False
    use: False
  scheduler: rs
  sha:
    budgets: []
    elim_rate: 3
    iter: 0
  ss: 
  table:
    eps: 0.1
    idx: 0
    num: 27
  trial_index: 0
  working_folder: hpo
# 网络模型相关配置
model:
  contrast_temp: 1.0
  contrast_topk: 100
  downstream_tasks: []
  dropout: 0.5   # 随机失活
  embed_size: 8   # 嵌入维度
  gamma: 0
  graph_pooling: mean
  hidden: 2048  # 隐藏层的输出维度
  in_channels: 0  # 输入维度
  input_shape: ()  # 输入形状
  label_smoothing: 0.1
  lambda_: 0.1
  layer: 2  # 模型的层数
  length_penalty: 2.0
  max_answer_len: 30
  max_length: 200
  max_tree_depth: 3
  min_length: 1
  model_num_per_trainer: 1
  model_type: google/bert_uncased_L-2_H-128_A-2
  n_best_size: 20
  no_repeat_ngram_size: 3
  null_score_diff_threshold: 0.0
  num_beams: 5
  num_item: 0
  num_labels: 1   # 标签的数量
  num_of_trees: 10
  num_user: 0
  out_channels: 62  # 输出的维度
  pretrain_tasks: []
  stage: 
  task: node  
  type: convnet2   # 模型
  use_bias: True  # 是否存在偏置项
  use_contrastive_loss: False
nbafl:
  use: False
outdir: exp\FedAvg_convnet2_on_femnist_lr0.01_lstep1\sub_exp_20230930152640
personalization:
  K: 5
  beta: 1.0
  epoch_feature: 1
  epoch_linear: 2
  local_param: []
  local_update_steps: 1
  lr: 0.01
  lr_feature: 0.1
  lr_linear: 0.1
  regular_weight: 0.1
  share_non_trainable_para: False
  weight_decay: 0.0
print_decimal_digits: 6
quantization:
  method: none
  nbits: 8
regularizer:
  mu: 0.0
  type: 
seed: 12345
sgdmf:
  use: False
train:
  batch_or_epoch: epoch
  data_para_dids: []
  local_update_steps: 1
  optimizer:
    lr: 0.01
    type: SGD
    weight_decay: 0.0
  scheduler:
    type: 
    warmup_ratio: 0.0
trainer:
  disp_freq: 50
  local_entropy:
    alpha: 0.75
    eps: 0.0001
    gamma: 0.03
    inc_factor: 1.0
  sam:
    adaptive: False
    eta: 0.0
    rho: 1.0
  type: cvtrainer
  val_freq: 100000000
use_gpu: True
verbose: 1
vertical:
  use: False
wandb:
  use: False
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值