BP神经网络C程序

/******************************************************************************

                      ====================================================
        Network:      Backpropagation Network with Bias Terms and Momentum
                      ====================================================

        Application:  Time-Series Forecasting
                      Prediction of the Annual Number of Sunspots

        Author:       Karsten Kutza
        Date:         17.4.96

        Reference:    D.E. Rumelhart, G.E. Hinton, R.J. Williams
                      Learning Internal Representations by Error Propagation
                      in:
                      D.E. Rumelhart, J.L. McClelland (Eds.)
                      Parallel Distributed Processing, Volume 1
                      MIT Press, Cambridge, MA, pp. 318-362, 1986

 ******************************************************************************/




/******************************************************************************
                            D E C L A R A T I O N S
 ******************************************************************************/


#include <stdlib.h>
#include <stdio.h>
#include <math.h>


typedef int           BOOL;
typedef int           INT;
typedef double        REAL;

#define FALSE         0
#define TRUE          1
#define NOT           !
#define AND           &&
#define OR            ||

#define MIN_REAL      -HUGE_VAL
#define MAX_REAL      +HUGE_VAL
#define MIN(x,y)      ((x)<(y) ? (x) : (y))
#define MAX(x,y)      ((x)>(y) ? (x) : (y))

#define LO            0.1
#define HI            0.9
#define BIAS          1

#define sqr(x)        ((x)*(x))


typedef struct {                     /* A LAYER OF A NET:                     */
        INT           Units;         /* - number of units in this layer       */
        REAL*         Output;        /* - output of ith unit                  */
        REAL*         Error;         /* - error term of ith unit              */
        REAL**        Weight;        /* - connection weights to ith unit      */
        REAL**        WeightSave;    /* - saved weights for stopped training  */
        REAL**        dWeight;       /* - last weight deltas for momentum     */
} LAYER;

typedef struct {                     /* A NET:                                */
        LAYER**       Layer;         /* - layers of this net                  */
        LAYER*        InputLayer;    /* - input layer                         */
        LAYER*        OutputLayer;   /* - output layer                        */
        REAL          Alpha;         /* - momentum factor                     */
        REAL          Eta;           /* - learning rate                       */
        REAL          Gain;          /* - gain of sigmoid function            */
        REAL          Error;         /* - total net error                     */
} NET;


/******************************************************************************
        R A N D O M S   D R A W N   F R O M   D I S T R I B U T I O N S
 ******************************************************************************/


void InitializeRandoms()
{
  srand(4711);
}


INT RandomEqualINT(INT Low, INT High)
{
  return rand() % (High-Low+1) + Low;
}      


REAL RandomEqualREAL(REAL Low, REAL High)
{
  return ((REAL) rand() / RAND_MAX) * (High-Low) + Low;
}      


/******************************************************************************
               A P P L I C A T I O N - S P E C I F I C   C O D E
 ******************************************************************************/


#define NUM_LAYERS    3
#define N             30
#define M             1
INT                   Units[NUM_LAYERS] = {N, 10, M};

#define FIRST_YEAR    1700
#define NUM_YEARS     280

#define TRAIN_LWB     (N)
#define TRAIN_UPB     (179)
#define TRAIN_YEARS   (TRAIN_UPB - TRAIN_LWB + 1)
#define TEST_LWB      (180)
#define TEST_UPB      (259)
#define TEST_YEARS    (TEST_UPB - TEST_LWB + 1)
#define EVAL_LWB      (260)
#define EVAL_UPB      (NUM_YEARS - 1)
#define EVAL_YEARS    (EVAL_UPB - EVAL_LWB + 1)

REAL                  Sunspots_[NUM_YEARS];
REAL                  Sunspots [NUM_YEARS] = {

                        0.0262,  0.0575,  0.0837,  0.1203,  0.1883,  0.3033,  
                        0.1517,  0.1046,  0.0523,  0.0418,  0.0157,  0.0000,  
                        0.0000,  0.0105,  0.0575,  0.1412,  0.2458,  0.3295,  
                        0.3138,  0.2040,  0.1464,  0.1360,  0.1151,  0.0575,  
                        0.1098,  0.2092,  0.4079,  0.6381,  0.5387,  0.3818,  
                        0.2458,  0.1831,  0.0575,  0.0262,  0.0837,  0.1778,  
                        0.3661,  0.4236,  0.5805,  0.5282,  0.3818,  0.2092,  
                        0.1046,  0.0837,  0.0262,  0.0575,  0.1151,  0.2092,  
                        0.3138,  0.4231,  0.4362,  0.2495,  0.2500,  0.1606,  
                        0.0638,  0.0502,  0.0534,  0.1700,  0.2489,  0.2824,  
                        0.3290,  0.4493,  0.3201,  0.2359,  0.1904,  0.1093,  
                        0.0596,  0.1977,  0.3651,  0.5549,  0.5272,  0.4268,  
                        0.3478,  0.1820,  0.1600,  0.0366,  0.1036,  0.4838,  
                        0.8075,  0.6585,  0.4435,  0.3562,  0.2014,  0.1192,  
                        0.0534,  0.1260,  0.4336,  0.6904,  0.6846,  0.6177,  
                        0.4702,  0.3483,  0.3138,  0.2453,  0.2144,  0.1114,  
                        0.0837,  0.0335,  0.0214,  0.0356,  0.0758,  0.1778,  
                        0.2354,  0.2254,  0.2484,  0.2207,  0.1470,  0.0528,  
                        0.0424,  0.0131,  0.0000,  0.0073,  0.0262,  0.0638,  
                        0.0727,  0.1851,  0.2395,  0.2150,  0.1574,  0.1250,  
                        0.0816,  0.0345,  0.0209,  0.0094,  0.0445,  0.0868,  
                        0.1898,  0.2594,  0.3358,  0.3504,  0.3708,  0.2500,  
                        0.1438,  0.0445,  0.0690,  0.2976,  0.6354,  0.7233,  
                        0.5397,  0.4482,  0.3379,  0.1919,  0.1266,  0.0560,  
                        0.0785,  0.2097,  0.3216,  0.5152,  0.6522,  0.5036,  
                        0.3483,  0.3373,  0.2829,  0.2040,  0.1077,  0.0350,  
                        0.0225,  0.1187,  0.2866,  0.4906,  0.5010,  0.4038,  
                        0.3091,  0.2301,  0.2458,  0.1595,  0.0853,  0.0382,  
                        0.1966,  0.3870,  0.7270,  0.5816,  0.5314,  0.3462,  
                        0.2338,  0.0889,  0.0591,  0.0649,  0.0178,  0.0314,  
                        0.1689,  0.2840,  0.3122,  0.3332,  0.3321,  0.2730,  
                        0.1328,  0.0685,  0.0356,  0.0330,  0.0371,  0.1862,  
                        0.3818,  0.4451,  0.4079,  0.3347,  0.2186,  0.1370,  
                        0.1396,  0.0633,  0.0497,  0.0141,  0.0262,  0.1276,  
                        0.2197,  0.3321,  0.2814,  0.3243,  0.2537,  0.2296,  
                        0.0973,  0.0298,  0.0188,  0.0073,  0.0502,  0.2479,  
                        0.2986,  0.5434,  0.4215,  0.3326,  0.1966,  0.1365,  
                        0.0743,  0.0303,  0.0873,  0.2317,  0.3342,  0.3609,  
                        0.4069,  0.3394,  0.1867,  0.1109,  0.0581,  0.0298,  
                        0.0455,  0.1888,  0.4168,  0.5983,  0.5732,  0.4644,  
                        0.3546,  0.2484,  0.1600,  0.0853,  0.0502,  0.1736,  
                        0.4843,  0.7929,  0.7128,  0.7045,  0.4388,  0.3630,  
                        0.1647,  0.0727,  0.0230,  0.1987,  0.7411,  0.9947,  
                        0.9665,  0.8316,  0.5873,  0.2819,  0.1961,  0.1459,  
                        0.0534,  0.0790,  0.2458,  0.4906,  0.5539,  0.5518,  
                        0.5465,  0.3483,  0.3603,  0.1987,  0.1804,  0.0811,  
                        0.0659,  0.1428,  0.4838,  0.8127 

                      };

REAL                  Mean;
REAL                  TrainError;
REAL                  TrainErrorPredictingMean;
REAL                  TestError;
REAL                  TestErrorPredictingMean;

FILE*                 f;


void NormalizeSunspots()
{
  INT  Year;
  REAL Min, Max;
	
  Min = MAX_REAL;
  Max = MIN_REAL;
  for (Year=0; Year<NUM_YEARS; Year++) {
    Min = MIN(Min, Sunspots[Year]);
    Max = MAX(Max, Sunspots[Year]);
  }
  Mean = 0;
  for (Year=0; Year<NUM_YEARS; Year++) {
    Sunspots_[Year] = 
    Sunspots [Year] = ((Sunspots[Year]-Min) / (Max-Min)) * (HI-LO) + LO;
    Mean += Sunspots[Year] / NUM_YEARS;
  }
}


void InitializeApplication(NET* Net)
{
  INT  Year, i;
  REAL Out, Err;

  Net->Alpha = 0.5;
  Net->Eta   = 0.05;
  Net->Gain  = 1;

  NormalizeSunspots();
  TrainErrorPredictingMean = 0;
  for (Year=TRAIN_LWB; Year<=TRAIN_UPB; Year++) {
    for (i=0; i<M; i++) {
      Out = Sunspots[Year+i];
      Err = Mean - Out;
      TrainErrorPredictingMean += 0.5 * sqr(Err);
    }
  }
  TestErrorPredictingMean = 0;
  for (Year=TEST_LWB; Year<=TEST_UPB; Year++) {
    for (i=0; i<M; i++) {
      Out = Sunspots[Year+i];
      Err = Mean - Out;
      TestErrorPredictingMean += 0.5 * sqr(Err);
    }
  }
  f = fopen("BPN.txt", "w");
}


void FinalizeApplication(NET* Net)
{
  fclose(f);
}


/******************************************************************************
                          I N I T I A L I Z A T I O N
 ******************************************************************************/


void GenerateNetwork(NET* Net)
{
  INT l,i;

  Net->Layer = (LAYER**) calloc(NUM_LAYERS, sizeof(LAYER*));
   
  for (l=0; l<NUM_LAYERS; l++) {
    Net->Layer[l] = (LAYER*) malloc(sizeof(LAYER));
      
    Net->Layer[l]->Units      = Units[l];
    Net->Layer[l]->Output     = (REAL*)  calloc(Units[l]+1, sizeof(REAL));
    Net->Layer[l]->Error      = (REAL*)  calloc(Units[l]+1, sizeof(REAL));
    Net->Layer[l]->Weight     = (REAL**) calloc(Units[l]+1, sizeof(REAL*));
    Net->Layer[l]->WeightSave = (REAL**) calloc(Units[l]+1, sizeof(REAL*));
    Net->Layer[l]->dWeight    = (REAL**) calloc(Units[l]+1, sizeof(REAL*));
    Net->Layer[l]->Output[0]  = BIAS;
      
    if (l != 0) {
      for (i=1; i<=Units[l]; i++) {
        Net->Layer[l]->Weight[i]     = (REAL*) calloc(Units[l-1]+1, sizeof(REAL));
        Net->Layer[l]->WeightSave[i] = (REAL*) calloc(Units[l-1]+1, sizeof(REAL));
        Net->Layer[l]->dWeight[i]    = (REAL*) calloc(Units[l-1]+1, sizeof(REAL));
      }
    }
  }
  Net->InputLayer  = Net->Layer[0];
  Net->OutputLayer = Net->Layer[NUM_LAYERS - 1];
  Net->Alpha       = 0.9;
  Net->Eta         = 0.25;
  Net->Gain        = 1;
}


void RandomWeights(NET* Net)
{
  INT l,i,j;
   
  for (l=1; l<NUM_LAYERS; l++) {
    for (i=1; i<=Net->Layer[l]->Units; i++) {
      for (j=0; j<=Net->Layer[l-1]->Units; j++) {
        Net->Layer[l]->Weight[i][j] = RandomEqualREAL(-0.5, 0.5);
      }
    }
  }
}


void SetInput(NET* Net, REAL* Input)
{
  INT i;
   
  for (i=1; i<=Net->InputLayer->Units; i++) {
    Net->InputLayer->Output[i] = Input[i-1];
  }
}


void GetOutput(NET* Net, REAL* Output)
{
  INT i;
   
  for (i=1; i<=Net->OutputLayer->Units; i++) {
    Output[i-1] = Net->OutputLayer->Output[i];
  }
}


/******************************************************************************
            S U P P O R T   F O R   S T O P P E D   T R A I N I N G
 ******************************************************************************/


void SaveWeights(NET* Net)
{
  INT l,i,j;

  for (l=1; l<NUM_LAYERS; l++) {
    for (i=1; i<=Net->Layer[l]->Units; i++) {
      for (j=0; j<=Net->Layer[l-1]->Units; j++) {
        Net->Layer[l]->WeightSave[i][j] = Net->Layer[l]->Weight[i][j];
      }
    }
  }
}


void RestoreWeights(NET* Net)
{
  INT l,i,j;

  for (l=1; l<NUM_LAYERS; l++) {
    for (i=1; i<=Net->Layer[l]->Units; i++) {
      for (j=0; j<=Net->Layer[l-1]->Units; j++) {
        Net->Layer[l]->Weight[i][j] = Net->Layer[l]->WeightSave[i][j];
      }
    }
  }
}


/******************************************************************************
                     P R O P A G A T I N G   S I G N A L S
 ******************************************************************************/


void PropagateLayer(NET* Net, LAYER* Lower, LAYER* Upper)
{
  INT  i,j;
  REAL Sum;

  for (i=1; i<=Upper->Units; i++) {
    Sum = 0;
    for (j=0; j<=Lower->Units; j++) {
      Sum += Upper->Weight[i][j] * Lower->Output[j];
    }
    Upper->Output[i] = 1 / (1 + exp(-Net->Gain * Sum));
  }
}


void PropagateNet(NET* Net)
{
  INT l;
   
  for (l=0; l<NUM_LAYERS-1; l++) {
    PropagateLayer(Net, Net->Layer[l], Net->Layer[l+1]);
  }
}


/******************************************************************************
                  B A C K P R O P A G A T I N G   E R R O R S
 ******************************************************************************/


void ComputeOutputError(NET* Net, REAL* Target)
{
  INT  i;
  REAL Out, Err;
   
  Net->Error = 0;
  for (i=1; i<=Net->OutputLayer->Units; i++) {
    Out = Net->OutputLayer->Output[i];
    Err = Target[i-1]-Out;
    Net->OutputLayer->Error[i] = Net->Gain * Out * (1-Out) * Err;
    Net->Error += 0.5 * sqr(Err);
  }
}


void BackpropagateLayer(NET* Net, LAYER* Upper, LAYER* Lower)
{
  INT  i,j;
  REAL Out, Err;
   
  for (i=1; i<=Lower->Units; i++) {
    Out = Lower->Output[i];
    Err = 0;
    for (j=1; j<=Upper->Units; j++) {
      Err += Upper->Weight[j][i] * Upper->Error[j];
    }
    Lower->Error[i] = Net->Gain * Out * (1-Out) * Err;
  }
}


void BackpropagateNet(NET* Net)
{
  INT l;
   
  for (l=NUM_LAYERS-1; l>1; l--) {
    BackpropagateLayer(Net, Net->Layer[l], Net->Layer[l-1]);
  }
}


void AdjustWeights(NET* Net)
{
  INT  l,i,j;
  REAL Out, Err, dWeight;
   
  for (l=1; l<NUM_LAYERS; l++) {
    for (i=1; i<=Net->Layer[l]->Units; i++) {
      for (j=0; j<=Net->Layer[l-1]->Units; j++) {
        Out = Net->Layer[l-1]->Output[j];
        Err = Net->Layer[l]->Error[i];
        dWeight = Net->Layer[l]->dWeight[i][j];
        Net->Layer[l]->Weight[i][j] += Net->Eta * Err * Out + Net->Alpha * dWeight;
        Net->Layer[l]->dWeight[i][j] = Net->Eta * Err * Out;
      }
    }
  }
}


/******************************************************************************
                      S I M U L A T I N G   T H E   N E T
 ******************************************************************************/


void SimulateNet(NET* Net, REAL* Input, REAL* Output, REAL* Target, BOOL Training)
{
  SetInput(Net, Input);
  PropagateNet(Net);
  GetOutput(Net, Output);
   
  ComputeOutputError(Net, Target);
  if (Training) {
    BackpropagateNet(Net);
    AdjustWeights(Net);
  }
}


void TrainNet(NET* Net, INT Epochs)
{
  INT  Year, n;
  REAL Output[M];

  for (n=0; n<Epochs*TRAIN_YEARS; n++) {
    Year = RandomEqualINT(TRAIN_LWB, TRAIN_UPB);
    SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), TRUE);
  }
}


void TestNet(NET* Net)
{
  INT  Year;
  REAL Output[M];

  TrainError = 0;
  for (Year=TRAIN_LWB; Year<=TRAIN_UPB; Year++) {
    SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), FALSE);
    TrainError += Net->Error;
  }
  TestError = 0;
  for (Year=TEST_LWB; Year<=TEST_UPB; Year++) {
    SimulateNet(Net, &(Sunspots[Year-N]), Output, &(Sunspots[Year]), FALSE);
    TestError += Net->Error;
  }
  fprintf(f, "\nNMSE is %0.3f on Training Set and %0.3f on Test Set",
             TrainError / TrainErrorPredictingMean,
             TestError / TestErrorPredictingMean);
}


void EvaluateNet(NET* Net)
{
  INT  Year;
  REAL Output [M];
  REAL Output_[M];

  fprintf(f, "\n\n\n");
  fprintf(f, "Year    Sunspots    Open-Loop Prediction    Closed-Loop Prediction\n");
  fprintf(f, "\n");
  for (Year=EVAL_LWB; Year<=EVAL_UPB; Year++) {
    SimulateNet(Net, &(Sunspots [Year-N]), Output,  &(Sunspots [Year]), FALSE);
    SimulateNet(Net, &(Sunspots_[Year-N]), Output_, &(Sunspots_[Year]), FALSE);
    Sunspots_[Year] = Output_[0];
    fprintf(f, "%d       %0.3f                   %0.3f                     %0.3f\n",
               FIRST_YEAR + Year,
               Sunspots[Year],
               Output [0],
               Output_[0]);
  }
}


/******************************************************************************
                                    M A I N
 ******************************************************************************/


void main()
{
  NET  Net;
  BOOL Stop;
  REAL MinTestError;

  InitializeRandoms();
  GenerateNetwork(&Net);
  RandomWeights(&Net);
  InitializeApplication(&Net);

  Stop = FALSE;
  MinTestError = MAX_REAL;
  do {
    TrainNet(&Net, 10);
    TestNet(&Net);
    if (TestError < MinTestError) {
      fprintf(f, " - saving Weights ...");
      MinTestError = TestError;
      SaveWeights(&Net);
    }
    else if (TestError > 1.2 * MinTestError) {
      fprintf(f, " - stopping Training and restoring Weights ...");
      Stop = TRUE;
      RestoreWeights(&Net);
    }
  } while (NOT Stop);

  TestNet(&Net);
  EvaluateNet(&Net);
   
  FinalizeApplication(&Net);
}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
人工神经网络是一种模拟人脑神经系统工作原理的计算模型,用于模拟人类智能的行为和思维过程。它由神经元和神经连接构成,通过改变神经元之间的连接权重来实现对输入信号的处理和输出结果的预测。 bp(反向传播)算法是人工神经网络中常用的训练算法之一。它通过将网络输出结果与预期结果之间的误差反向传播,调整神经元之间的连接权重,来达到减小误差的目的。bp算法的具体实现过程包含前向传播和反向传播两部分。 以C语言为例,以下是一个简单的bp算法的实现程序示例: ```c #include <stdio.h> #include <stdlib.h> #include <math.h> #define INPUT_NUM 2 #define HIDDEN_NUM 2 #define OUTPUT_NUM 1 #define LEARNING_RATE 0.5 #define MAX_ITERATIONS 10000 #define ERROR_THRESHOLD 0.0001 /* 定义神经网络结构 */ typedef struct { double input[INPUT_NUM]; // 输入层 double hidden[HIDDEN_NUM]; // 隐藏层 double output[OUTPUT_NUM]; // 输出层 double hidden_weights[HIDDEN_NUM][INPUT_NUM]; // 隐藏层权重 double output_weights[OUTPUT_NUM][HIDDEN_NUM]; // 输出层权重 } NeuralNetwork; /* sigmoid激活函数 */ double sigmoid(double x) { return 1 / (1 + exp(-x)); } /* sigmoid激活函数的导数 */ double sigmoid_derivative(double x) { return sigmoid(x) * (1 - sigmoid(x)); } /* 前向传播 */ void forward(NeuralNetwork *nn) { // 隐藏层计算 for (int i = 0; i < HIDDEN_NUM; i++) { double sum = 0.0; for (int j = 0; j < INPUT_NUM; j++) { sum += nn->input[j] * nn->hidden_weights[i][j]; } nn->hidden[i] = sigmoid(sum); } // 输出层计算 for (int i = 0; i < OUTPUT_NUM; i++) { double sum = 0.0; for (int j = 0; j < HIDDEN_NUM; j++) { sum += nn->hidden[j] * nn->output_weights[i][j]; } nn->output[i] = sigmoid(sum); } } /* 反向传播 */ void backward(NeuralNetwork *nn, double target_value) { // 输出层误差计算 double output_error[OUTPUT_NUM]; for (int i = 0; i < OUTPUT_NUM; i++) { output_error[i] = (target_value - nn->output[i]) * sigmoid_derivative(nn->output[i]); } // 隐藏层误差计算 double hidden_error[HIDDEN_NUM]; for (int i = 0; i < HIDDEN_NUM; i++) { double sum = 0.0; for (int j = 0; j < OUTPUT_NUM; j++) { sum += output_error[j] * nn->output_weights[j][i]; } hidden_error[i] = sum * sigmoid_derivative(nn->hidden[i]); } // 更新输出层权重 for (int i = 0; i < OUTPUT_NUM; i++) { for (int j = 0; j < HIDDEN_NUM; j++) { nn->output_weights[i][j] += LEARNING_RATE * output_error[i] * nn->hidden[j]; } } // 更新隐藏层权重 for (int i = 0; i < HIDDEN_NUM; i++) { for (int j = 0; j < INPUT_NUM; j++) { nn->hidden_weights[i][j] += LEARNING_RATE * hidden_error[i] * nn->input[j]; } } } /* 训练神经网络 */ void train(NeuralNetwork *nn, double input1, double input2, double target_value) { nn->input[0] = input1; nn->input[1] = input2; // 进行前向传播和反向传播 forward(nn); backward(nn, target_value); } int main() { NeuralNetwork nn; double input1, input2, target_value; int iterations = 0; // 初始化权重 for (int i = 0; i < HIDDEN_NUM; i++) { for (int j = 0; j < INPUT_NUM; j++) { nn.hidden_weights[i][j] = (double)rand() / RAND_MAX; } } for (int i = 0; i < OUTPUT_NUM; i++) { for (int j = 0; j < HIDDEN_NUM; j++) { nn.output_weights[i][j] = (double)rand() / RAND_MAX; } } // 训练神经网络 do { input1 = (double)rand() / RAND_MAX; input2 = (double)rand() / RAND_MAX; target_value = input1 + input2; train(&nn, input1, input2, target_value); iterations++; } while (iterations < MAX_ITERATIONS && nn.output[0] - target_value > ERROR_THRESHOLD); // 打印训练结果 printf("训练结果:\n"); printf("输入1: %.2f, 输入2: %.2f, 目标值: %.2f, 输出值: %.2f\n", input1, input2, target_value, nn.output[0]); return 0; } ``` 以上是一个简单的bp算法的神经网络训练程序示例,其中定义了一个具有2个输入神经元、2个隐藏神经元和1个输出神经元的神经网络结构,并通过随机初始化权重和输入数据,训练神经网络,最终输出训练结果。 请注意,以上示例只是一个简单的实现程序,实际应用中可能需要进一步优化和改进。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值