信用评分卡 DAY10-12

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
import seaborn as sns
from scipy import stats
import copy

from sklearn.model_selection import train_test_split
 
train_data = pd.read_csv('data/ScorecardsData.csv')
train_data = train_data.iloc[:,1:]
train_data.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 150000 entries, 0 to 149999
Data columns (total 11 columns):
 #   Column                                Non-Null Count   Dtype  
---  ------                                --------------   -----  
 0   SeriousDlqin2yrs                      150000 non-null  int64  
 1   RevolvingUtilizationOfUnsecuredLines  150000 non-null  float64
 2   age                                   150000 non-null  int64  
 3   NumberOfTime30-59DaysPastDueNotWorse  150000 non-null  int64  
 4   DebtRatio                             150000 non-null  float64
 5   MonthlyIncome                         120268 non-null  float64
 6   NumberOfOpenCreditLinesAndLoans       150000 non-null  int64  
 7   NumberOfTimes90DaysLate               150000 non-null  int64  
 8   NumberRealEstateLoansOrLines          150000 non-null  int64  
 9   NumberOfTime60-89DaysPastDueNotWorse  150000 non-null  int64  
 10  NumberOfDependents                    146076 non-null  float64
dtypes: float64(4), int64(7)
memory usage: 12.6 MB
train_box = train_data.iloc[:,[3,7,9]]
train_box.boxplot()
<AxesSubplot:>

在这里插入图片描述

train,test = train_test_split(train_data,test_size =0.2,random_state=100)
# mData = train.iloc[:,[5,0,1,2,3,4,6,7,8,9]]
# train_d=mData.values

# train_known = mData[mData.MonthlyIncome.notnull()].values
# train_unknown = mData[mData.MonthlyIncome.isnull()].values
# train_X = train_known[:,1:]
# train_y = train_known[:,0]

# rfr = RandomForestRegressor(random_state=0,n_estimators=200,max_depth=3,n_jobs=-1)
# rfr.fit(train_X,train_y)
# predicted_y = rfr.predict(train_unknown[:,1:]).round(0)
# train.loc[train.MonthlyIncome.isnull(),'MonthlyIncome'] = predicted_y

train = train.dropna()
train = train.drop_duplicates()

train['SeriousDlqin2yrs'] = 1-train['SeriousDlqin2yrs']
train = train[train['NumberOfTime30-59DaysPastDueNotWorse']<90]
train = train[train.age>0]



age = train['age']
sns.distplot(age)
<AxesSubplot:xlabel='age'>

在这里插入图片描述

train_y = train.iloc[:,0]
train_X = train.iloc[:,1:]
test = test.dropna()
test = test.drop_duplicates()
test.info()
test['SeriousDlqin2yrs'] = 1-test['SeriousDlqin2yrs']
test_y = test.iloc[:,0]
test_X = test.iloc[:,1:]

informationValue = []
cut=[]
<class 'pandas.core.frame.DataFrame'>
Int64Index: 24034 entries, 149311 to 6008
Data columns (total 11 columns):
 #   Column                                Non-Null Count  Dtype  
---  ------                                --------------  -----  
 0   SeriousDlqin2yrs                      24034 non-null  int64  
 1   RevolvingUtilizationOfUnsecuredLines  24034 non-null  float64
 2   age                                   24034 non-null  int64  
 3   NumberOfTime30-59DaysPastDueNotWorse  24034 non-null  int64  
 4   DebtRatio                             24034 non-null  float64
 5   MonthlyIncome                         24034 non-null  float64
 6   NumberOfOpenCreditLinesAndLoans       24034 non-null  int64  
 7   NumberOfTimes90DaysLate               24034 non-null  int64  
 8   NumberRealEstateLoansOrLines          24034 non-null  int64  
 9   NumberOfTime60-89DaysPastDueNotWorse  24034 non-null  int64  
 10  NumberOfDependents                    24034 non-null  float64
dtypes: float64(4), int64(7)
memory usage: 2.2 MB

在这里插入图片描述

from sklearn.tree import DecisionTreeClassifier
def optimal_binning_boundary(x, y):
    '''
        利用决策树获得最优分箱的边界值列表,利用决策树生成的内部划分节点的阈值,作为分箱的边界
    '''
    boundary = []  # 待return的分箱边界值列表

    x = x.fillna(-1).values  # 填充缺失值
    y = y.values

    clf = DecisionTreeClassifier(criterion='entropy',  # “信息熵”最小化准则划分
                                 max_leaf_nodes=6,  # 最大叶子节点数
                                 min_samples_leaf=0.05)  # 叶子节点样本数量最小占比

    clf.fit(x.reshape(-1,1), y)  # 训练决策树
	
    #tree.plot_tree(clf) #打印决策树的结构图
    #plt.show()

    n_nodes = clf.tree_.node_count #决策树的节点数
    children_left = clf.tree_.children_left #node_count大小的数组,children_left[i]表示第i个节点的左子节点
    children_right = clf.tree_.children_right #node_count大小的数组,children_right[i]表示第i个节点的右子节点
    threshold = clf.tree_.threshold #node_count大小的数组,threshold[i]表示第i个节点划分数据集的阈值

    for i in range(n_nodes):
        if children_left[i] != children_right[i]:  # 非叶节点
            boundary.append(threshold[i])

    boundary.sort()

    min_x = x.min()
    max_x = x.max() + 0.1  # +0.1是为了考虑后续groupby操作时,能包含特征最大值的样本
    boundary = [min_x] + boundary + [max_x]

    return boundary
def feature_woe_iv(x,y) -> pd.DataFrame:
    '''
        计算变量各个分箱的WOE、IV值,返回一个DataFrame
    '''
    boundary = optimal_binning_boundary(x, y)  # 获得最优分箱边界值列表
    df = pd.concat([x, y], axis=1)  # 合并x、y为一个DataFrame,方便后续计算
    df.columns = ['x', 'y']  # 特征变量、目标变量字段的重命名
    df['bins'] = pd.cut(x=x, bins=boundary, right=False)  # 获得每个x值所在的分箱区间,
                                                           # right为False代表右边是开区间
    grouped = df.groupby('bins')['y']  # 统计各分箱区间的好、坏、总客户数量
    result_df = grouped.agg([('good', lambda y: (y == 1).sum()),
                             ('bad', lambda y: (y == 0).sum()),
                             ('total', 'count')])

    result_df['good_pct'] = result_df['good'] / result_df['good'].sum()  # 好客户占比
    result_df['bad_pct'] = result_df['bad'] / result_df['bad'].sum()  # 坏客户占比
    result_df['total_pct'] = result_df['total'] / result_df['total'].sum()  # 总客户占比

    result_df['bad_rate'] = result_df['bad'] / result_df['total']  # 坏比率

    result_df['woe'] = np.log(result_df['good_pct'] / result_df['bad_pct'])  # WOE
    result_df['iv'] = (result_df['good_pct'] - result_df['bad_pct']) * result_df['woe']  # IV
    woe=list(result_df['woe'].round(3))
    print(f"该变量IV = {result_df['iv'].sum()}")
    iv=result_df['iv'].sum()
    print(informationValue)
    informationValue.append(iv)

#     print(informationValue)
    return result_df,woe

    # boundary = optimal_binning_boundary(x=data['RevolvingUtilizationOfUnsecuredLines'],
    #                                     y=data['SeriousDlqin2yrs'])
    # print(boundary)
result_df,x1_woe= feature_woe_iv(x=train_X['RevolvingUtilizationOfUnsecuredLines'],
                               y=train_y)
result_df

在这里插入图片描述

该变量IV = 1.009962126764656
[]
goodbadtotalgood_pctbad_pcttotal_pctbad_ratewoeiv
bins
[0.0, 2.52e-05)603421762510.0674790.0327790.0650870.0347140.7220080.025053
[2.52e-05, 0.142)37270704379740.4167920.1063440.3953940.0185391.3659050.424043
[0.142, 0.314)13946537144830.1559590.0811180.1508000.0370780.6536900.048923
[0.314, 0.501)9424587100110.1053890.0886710.1042370.0586360.1727300.002888
[0.501, 0.86)118381497133350.1323850.2261330.1388470.112261-0.5354090.050193
[0.86, 29110.1)109093078139870.1219960.4649550.1456360.220061-1.3379520.458862
result_df,x2_woe = feature_woe_iv(x=train_X['age'],
                               y=train_y)
result_df
该变量IV = 0.19925987486906607
[1.009962126764656]
goodbadtotalgood_pctbad_pcttotal_pctbad_ratewoeiv
bins
[21.0, 33.5)103021250115520.1152080.1888220.1202820.108206-0.4940660.036370
[33.5, 42.5)150651540166050.1684730.2326280.1728950.092743-0.3226680.020701
[42.5, 56.5)308002526333260.3444380.3815710.3469980.075797-0.1023830.003802
[56.5, 63.5)14276745150210.1596490.1125380.1564020.0495970.3496910.016474
[63.5, 67.5)608022463040.0679930.0338370.0656390.0355330.6978540.023836
[67.5, 103.1)12898335132330.1442390.0506040.1377850.0253151.0474370.098077
result_df,x3_woe = feature_woe_iv(x=train_X['NumberOfTime30-59DaysPastDueNotWorse'],
                               y=train_y)
result_df
该变量IV = 0.6885535264867229
[1.009962126764656, 0.19925987486906607]
goodbadtotalgood_pctbad_pcttotal_pctbad_ratewoeiv
bins
[0.0, 0.5)765233344798670.8557610.5051360.8315930.0418700.5271630.184837
[0.5, 1.5)92711613108840.1036780.2436560.1133270.148199-0.8544650.119606
[1.5, 13.1)3627166352900.0405610.2512080.0550810.314367-1.8234780.384111
result_df,x4_woe = feature_woe_iv(x=train_X['DebtRatio'],
                               y=train_y)
result_df

该变量IV = 0.0893992422284528
[1.009962126764656, 0.19925987486906607, 0.6885535264867229]
goodbadtotalgood_pctbad_pcttotal_pctbad_ratewoeiv
bins
[0.0, 0.0163)812835384810.0908960.0533230.0883060.0416220.5333420.020039
[0.0163, 0.201)225111572240830.2517420.2374620.2507570.0652740.0583950.000834
[0.201, 0.386)271831569287520.3039890.2370090.2993720.0545700.2488930.016671
[0.386, 0.505)11724880126040.1311100.1329310.1312360.069819-0.0137890.000025
[0.505, 0.654)803280888400.0898220.1220540.0920440.091403-0.3066330.009883
[0.654, 61106.6)118431438132810.1324410.2172210.1382850.108275-0.4947760.041947
result_df,x5_woe = feature_woe_iv(x=train_X['MonthlyIncome'],
                               y=train_y)
result_df

该变量IV = 0.09696580500519011
[1.009962126764656, 0.19925987486906607, 0.6885535264867229, 0.0893992422284528]
goodbadtotalgood_pctbad_pcttotal_pctbad_ratewoeiv
bins
[0.0, 1302.5)451831048280.0505250.0468280.0502700.0642090.0759920.000281
[1302.5, 3332.5)162171795180120.1813560.2711480.1875450.099656-0.4022050.036115
[3332.5, 4838.5)169561530184860.1896200.2311180.1924800.082765-0.1979060.008213
[4838.5, 6620.5)174461273187190.1951000.1922960.1949060.0680060.0144740.000041
[6620.5, 10237.0)208251122219470.2328870.1694860.2285170.0511230.3177810.020148
[10237.0, 3008750.1)13459590140490.1505130.0891240.1462810.0419960.5240210.032169
result_df,x6_woe = feature_woe_iv(x=train_X['NumberOfOpenCreditLinesAndLoans'],
                               y=train_y)
result_df
该变量IV = 0.05605247509666227
[1.009962126764656, 0.19925987486906607, 0.6885535264867229, 0.0893992422284528, 0.09696580500519011]
goodbadtotalgood_pctbad_pcttotal_pctbad_ratewoeiv
bins
[0.0, 2.5)595284968010.0665620.1282480.0708140.124835-0.6558370.040456
[2.5, 3.5)481944352620.0538910.0669180.0547890.084189-0.2165080.002821
[3.5, 4.5)644849669440.0721080.0749240.0723020.071429-0.0383110.000108
[4.5, 5.5)756355281150.0845770.0833840.0844950.0680220.0142150.000017
[5.5, 13.5)505173204537210.5649340.4839880.5593550.0596410.1546500.012518
[13.5, 58.1)141221076151980.1579270.1625380.1582450.070799-0.0287770.000133
result_df,x7_woe = feature_woe_iv(x=train_X['NumberOfTimes90DaysLate'],
                               y=train_y)
result_df
该变量IV = 0.7636872299659871
[1.009962126764656, 0.19925987486906607, 0.6885535264867229, 0.0893992422284528, 0.09696580500519011, 0.05605247509666227]
goodbadtotalgood_pctbad_pcttotal_pctbad_ratewoeiv
bins
[0.0, 0.5)863654461908260.9658250.6738670.94570.0491160.3599490.105090
[0.5, 17.1)3056215952150.0341750.3261330.05430.413998-2.2557980.658597
result_df,x8_woe = feature_woe_iv(x=train_X['NumberRealEstateLoansOrLines'],
                               y=train_y)
result_df
该变量IV = 0.041023510733383756
[1.009962126764656, 0.19925987486906607, 0.6885535264867229, 0.0893992422284528, 0.09696580500519011, 0.05605247509666227, 0.7636872299659871]
goodbadtotalgood_pctbad_pcttotal_pctbad_ratewoeiv
bins
[0.0, 0.5)316462893345390.3538990.4370090.3596280.083760-0.2109420.017531
[0.5, 1.5)314641915333790.3518640.2892750.3475490.0573710.1958660.012259
[1.5, 2.5)201361237213730.2251820.1868580.2225400.0578770.1865600.007150
[2.5, 54.1)617557567500.0690550.0868580.0702820.085185-0.2293660.004083
result_df,x9_woe = feature_woe_iv(x=train_X['NumberOfTime60-89DaysPastDueNotWorse'],
                               y=train_y)
result_df
该变量IV = 0.5139598716969411
[1.009962126764656, 0.19925987486906607, 0.6885535264867229, 0.0893992422284528, 0.09696580500519011, 0.05605247509666227, 0.7636872299659871, 0.041023510733383756]
goodbadtotalgood_pctbad_pcttotal_pctbad_ratewoeiv
bins
[0.0, 0.5)862344882911160.964360.7374620.948720.0535800.2682490.060865
[0.5, 11.1)3187173849250.035640.2625380.051280.352893-1.9969150.453095
result_df,x10_woe = feature_woe_iv(x=train_X['NumberOfDependents'],
                               y=train_y)
result_df
该变量IV = 0.033540678987747465
[1.009962126764656, 0.19925987486906607, 0.6885535264867229, 0.0893992422284528, 0.09696580500519011, 0.05605247509666227, 0.7636872299659871, 0.041023510733383756, 0.5139598716969411]
goodbadtotalgood_pctbad_pcttotal_pctbad_ratewoeiv
bins
[0.0, 0.5)490093068520770.5480700.4634440.5422370.0589130.1677180.014193
[0.5, 1.5)180631460195230.2020000.2205440.2032780.074784-0.0878310.001629
[1.5, 2.5)133691160145290.1495060.1752270.1512790.079840-0.1587420.004083
[2.5, 20.1)898093299120.1004240.1407850.1032060.094027-0.3378380.013636
corr = train_data.corr()
xticks = ['x0','x1','x2','x3','x4','x5','x6','x7','x8','x9','x10']
yticks = list(corr.index)
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
sns.heatmap(corr, annot=True, cmap='rainbow', ax=ax1, annot_kws={'size': 5,  'color': 'blue'})
ax1.set_xticklabels(xticks, rotation=0, fontsize=10)
ax1.set_yticklabels(yticks, rotation=0, fontsize=10)
plt.show()

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-TzYaRwaV-1625708289322)(output_18_0.png)]


print(informationValue)
index=['x1','x2','x3','x4','x5','x6','x7','x8','x9','x10']
index_num = range(len(index))
ax=plt.bar(index_num,informationValue,tick_label=index)
plt.show()

[1.009962126764656, 0.19925987486906607, 0.6885535264867229, 0.0893992422284528, 0.09696580500519011, 0.05605247509666227, 0.7636872299659871, 0.041023510733383756, 0.5139598716969411, 0.033540678987747465]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-cqvCaJno-1625708289323)(output_19_1.png)]

选取x2、x3、x7、x9

def trans_woe(var,var_name,x_woe,x_cut):
    woe_name = var_name + '_woe'
    for i in range(len(x_woe)):
        if i == 0:
            var.loc[(var[var_name]<=x_cut[i+1]),woe_name] = x_woe[i]
        elif (i>0) and (i<= len(x_woe)-2):
            var.loc[((var[var_name]>x_cut[i])&(var[var_name]<=x_cut[i+1])),woe_name] = x_woe[i]
        else:
            var.loc[(var[var_name]>x_cut[len(x_woe)-1]),woe_name] = x_woe[len(x_woe)-1]
    return var
 
x1_name = 'RevolvingUtilizationOfUnsecuredLines'
x2_name = 'age'
x3_name = 'NumberOfTime30-59DaysPastDueNotWorse'
x4_name = 'DebtRatio'
x5_name = 'MonthlyIncome'
x6_name = 'NumberOfOpenCreditLinesAndLoans'
x7_name = 'NumberOfTimes90DaysLate'
x8_name = 'NumberRealEstateLoansOrLines'
x9_name = 'NumberOfTime60-89DaysPastDueNotWorse'
x1_cut = [float('-inf'),0.137,0.301,0.501,0.699,0.86,29110.1,float('+inf')]
x2_cut = [float('-inf'),21,36,46,56,63,67,107,float('+inf')]
x3_cut = [float('-inf'),0.5,1.5,13.1,float('+inf')]
x4_cut = [float('-inf'),0.0163,0.423,0.505,0.654,3.973,329664.1,float('+inf')]
x5_cut = [float('-inf'),0,0.5,1.5,13.1,float('+inf')]
x6_cut = [float('-inf'),2.5,4.5,5.5,8.5,14.5,58.1,float('+inf')]
x7_cut = [float('-inf'),0.5,17.1,float('+inf')]
x8_cut = [float('-inf'),0.5,1.5,2.5,54.1,float('+inf')]
x9_cut = [float('-inf'),1265.5,3600.5,3614.5,5300.5,6449.5,3008750.1,float('+inf')]
# train_X = trans_woe(train_X,x1_name,x1_woe,x1_cut)
train_X = trans_woe(train_X,x2_name,x2_woe,x2_cut)
# train_X = trans_woe(train_X,x3_name,x3_woe,x3_cut)
train_X = trans_woe(train_X,x4_name,x4_woe,x4_cut)
train_X = trans_woe(train_X,x5_name,x5_woe,x5_cut)
train_X = trans_woe(train_X,x6_name,x6_woe,x6_cut)
train_X = trans_woe(train_X,x7_name,x7_woe,x7_cut)
train_X = trans_woe(train_X,x8_name,x8_woe,x8_cut)
# train_X = trans_woe(train_X,x9_name,x9_woe,x9_cut)

train_X = train_X.iloc[:,-6:]
from sklearn.linear_model import LogisticRegression  #导入logistic回归模块
from sklearn.model_selection import train_test_split #导入数据切分函数

train_X.head()
age_woeDebtRatio_woeMonthlyIncome_woeNumberOfOpenCreditLinesAndLoans_woeNumberOfTimes90DaysLate_woeNumberRealEstateLoansOrLines_woe
33020.3500.0580.3180.1550.360.187
112869-0.1020.0580.3180.1550.360.196
1249341.0470.5330.318-0.6560.36-0.211
140470.3500.0580.318-0.2170.36-0.211
101221-0.1020.0580.3180.1550.360.187
import statsmodels.api as sm
X1=sm.add_constant(train_X)
logit=sm.Logit(train_y,X1)
result=logit.fit()
print(result.summary())
Optimization terminated successfully.
         Current function value: 0.215753
         Iterations 8
                           Logit Regression Results                           
==============================================================================
Dep. Variable:       SeriousDlqin2yrs   No. Observations:                96041
Model:                          Logit   Df Residuals:                    96034
Method:                           MLE   Df Model:                            6
Date:                Thu, 08 Jul 2021   Pseudo R-squ.:                  0.1399
Time:                        09:02:25   Log-Likelihood:                -20721.
converged:                       True   LL-Null:                       -24093.
Covariance Type:            nonrobust   LLR p-value:                     0.000
=======================================================================================================
                                          coef    std err          z      P>|z|      [0.025      0.975]
-------------------------------------------------------------------------------------------------------
const                                   3.8840      0.145     26.733      0.000       3.599       4.169
age_woe                                 0.6528      0.030     21.499      0.000       0.593       0.712
DebtRatio_woe                           1.6798      0.074     22.814      0.000       1.535       1.824
MonthlyIncome_woe                      -4.7477      0.459    -10.349      0.000      -5.647      -3.849
NumberOfOpenCreditLinesAndLoans_woe     0.2531      0.065      3.891      0.000       0.126       0.381
NumberOfTimes90DaysLate_woe             0.9489      0.013     73.846      0.000       0.924       0.974
NumberRealEstateLoansOrLines_woe        0.7415      0.072     10.329      0.000       0.601       0.882
=======================================================================================================
# test_X = trans_woe(test_X,x1_name,x1_woe,x1_cut)
test_X = trans_woe(test_X,x2_name,x2_woe,x2_cut)
# test_X = trans_woe(test_X,x3_name,x3_woe,x3_cut)
test_X = trans_woe(test_X,x4_name,x4_woe,x4_cut)
test_X = trans_woe(test_X,x5_name,x5_woe,x5_cut)
test_X = trans_woe(test_X,x6_name,x6_woe,x6_cut)
test_X = trans_woe(test_X,x7_name,x7_woe,x7_cut)
test_X = trans_woe(test_X,x8_name,x8_woe,x8_cut)
# test_X = trans_woe(test_X,x9_name,x9_woe,x9_cut)
 
test_X = test_X.iloc[:,-6:]
from sklearn import metrics
X3 = sm.add_constant(test_X)
resu = result.predict(X3)
fpr, tpr, threshold = metrics.roc_curve(test_y, resu)
rocauc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % rocauc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('TPR')
plt.xlabel('FPR')
plt.show()

最终AUC如下图所示:

AUC值

p = 20/np.log(2)
q = 600 - 20*np.log(20)/np.log(2)
 
def get_score(coe,woe,factor):
    scores=[]
    for w in woe:
        score=round(coe*w*factor,0)
        scores.append(score)
    return scores
 
x_coe = [3.8840,0.6528,1.6798,-4.7477,0.2531,0.9489,0.7415]
baseScore = round(q + p * x_coe[0], 0)

print(baseScore)
基础分:626.0
x2_score = get_score(x_coe[1], x2_woe, p)
x4_score = get_score(x_coe[2], x4_woe, p)
x5_score = get_score(x_coe[3], x5_woe, p)
x6_score = get_score(x_coe[4], x6_woe, p)
x7_score = get_score(x_coe[5], x7_woe, p)
x8_score = get_score(x_coe[6], x8_woe, p)
print(x2_score)
[-9.0, -6.0, -2.0, 7.0, 13.0, 20.0]
print (x4_score)
[26.0, 3.0, 12.0, -1.0, -15.0, -24.0]
print(x5_score)
[-10.0, 55.0, 27.0, -2.0, -44.0, -72.0]
print(x6_score)
[-5.0, -2.0, -0.0, 0.0, 1.0, -0.0]
print(x7_score)
[10.0, -62.0]
print(x8_score)
[-5.0, 4.0, 4.0, -5.0]
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值