电费敏感用户模型--低敏用户

电费敏感用户模型--低敏用户

数据简介:以电力用户的95598工单数据、电量电费营销数据等为基础,综合分析电费敏感用户特征,建立用户电费敏感度模型,对电费敏感用户的敏感程度进行量化评判,帮助供电企业快速、准确的识别电费敏感用户,从而对应的提供有针对性的电费、电量提醒等精细化用电服务。

基于敏感程度不同,考虑构建两种模型:

1、低敏感度用户模型,定义低敏感度用户为单位时间内有过1次95598工单记录

2、高敏感度用户模型,定义高敏感度用户为单位时间内有过2次及以上95598工单记录

根据所获得的信息和数据,我们可以利用的数据有3个:95598工单信息、客户通话信息记录和应收电费信息表

实现代码如下:

''' 加载所需的模块 '''
import pandas as pd
import numpy as np
import re
import os
import pickle
from numpy import log
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder

''' 数据集读入 '''
data_path = 'E:\电费敏感预测/rawdata/'
file_jobinfo_train = '01_arc_s_95598_wkst_train.tsv' #95598 train 
file_jobinfo_test = '01_arc_s_95598_wkst_test.tsv' # 95598 test    
file_comm = '02_s_comm_rec.tsv' # 通话信息记录
file_flow_train = '09_arc_a_rcvbl_flow.tsv' # 应收电费信息表 train
file_flow_test = '09_arc_a_rcvbl_flow_test.tsv' # 应收电费信息表 test
file_label = 'train_label.csv' # 训练集正例
file_test = 'test_to_predict.csv' # 测试集

train_info = pd.read_csv(data_path + 'processed_' + file_jobinfo_train, sep='\t', encoding='utf-8', quoting=csv.QUOTE_NONE)
# 过滤CUST_NO为空的用户
train_info = train_info.loc[~train_info.CUST_NO.isnull()]
train_info['CUST_NO'] = train_info.CUST_NO.astype(np.int64)
train_info.head()

''' 统计每个工单的情况,筛选高低敏感度用户的依据 '''
train = train_info.CUST_NO.value_counts().to_frame().reset_index()
train.head()

'''处理训练集和测试集,并查看数据集概况'''
train.columns = ['CUST_NO', 'counts_of_jobinfo']
temp = pd.read_csv(data_path + file_label, header=None)
temp.columns = ['CUST_NO']
train['label'] = 0
train.loc[train.CUST_NO.isin(temp.CUST_NO), 'label'] = 1
train = train[['CUST_NO', 'label', 'counts_of_jobinfo']]
test_info = pd.read_csv(data_path + 'processed_' + file_jobinfo_test, sep='\t', encoding='utf-8', quoting=csv.QUOTE_NONE)
test = test_info.CUST_NO.value_counts().to_frame().reset_index()
test.columns = ['CUST_NO', 'counts_of_jobinfo']
test['label'] = -1 # 为区别,将测试集标签定为-1来表示
test = test[['CUST_NO', 'label', 'counts_of_jobinfo']]
df = train.append(test).copy()
del temp, train, test

df = df.loc[df.counts_of_jobinfo == 1].copy() # 取工单记录数为1的用户进行建模
df.reset_index(drop=True, inplace=True)
train = df.loc[df.label != -1]
test = df.loc[df.label == -1]
print('原始数据中的低敏感度用户分布情况如下:')
print('训练集:',train.shape[0])
print('正样本:',train.loc[train.label == 1].shape[0])
print('负样本:',train.loc[train.label == 0].shape[0])
print('-----------------------')
print('测试集:',test.shape[0])
df.drop(['counts_of_jobinfo'], axis=1, inplace=True)

''' 处理95598工单信息表,把训练集和测试集标签传入工单表和通话记录信息表 '''
jobinfo = train_info.append(test_info).copy()
jobinfo = jobinfo.loc[jobinfo.CUST_NO.isin(df.CUST_NO)].copy() # 过滤掉没有在训练集测试集中出现的数据
jobinfo.reset_index(drop=True, inplace=True)
jobinfo = jobinfo.merge(df[['CUST_NO', 'label']], on='CUST_NO', how='left')

''' 处理通话信息记录表 '''
comm = pd.read_csv(data_path + file_comm, sep='\t') # 总数据量(1593088, 8)
comm.drop_duplicates(inplace=True)
comm = comm.loc[comm.APP_NO.isin(jobinfo.ID)] # 过滤掉没有出现在jobinfo中的数据
comm = comm.rename(columns={'APP_NO':'ID'})
comm = comm.merge(jobinfo[['ID', 'CUST_NO']], on='ID', how='left')
comm['REQ_BEGIN_DATE'] = comm.REQ_BEGIN_DATE.apply(lambda x:pd.to_datetime(x))
comm['REQ_FINISH_DATE'] = comm.REQ_FINISH_DATE.apply(lambda x:pd.to_datetime(x))
comm = comm.loc[~(comm.REQ_BEGIN_DATE > comm.REQ_FINISH_DATE)]
df = df.loc[df.CUST_NO.isin(comm.CUST_NO)].copy()
comm['holding_time'] = comm['REQ_FINISH_DATE'] - comm['REQ_BEGIN_DATE']
comm['holding_time_seconds'] = comm.holding_time.apply(lambda x:x.seconds)
df = df.merge(comm[['CUST_NO', 'holding_time_seconds']], how='left', on='CUST_NO')
df['holding_time_seconds'] = MinMaxScaler().fit_transform(df['holding_time_seconds'].values.reshape(-1, 1))
del comm

''' 接下来进行数据清洗和各种数据处理 '''
df['rank_CUST_NO'] = df.CUST_NO.rank(method='max') # 进行排名并归一化
df['rank_CUST_NO'] = MinMaxScaler().fit_transform(df.rank_CUST_NO.values.reshape(-1,1))

jobinfo = jobinfo.loc[jobinfo.CUST_NO.isin(df.CUST_NO)].copy()
jobinfo.reset_index(drop=True,inplace=True)
df = df.merge(jobinfo[['CUST_NO','BUSI_TYPE_CODE']],on='CUST_NO',how='left')
temp = pd.get_dummies(df.BUSI_TYPE_CODE,prefix='onehot_BUSI_TYPE_CODE',dummy_na=True)
df = pd.concat([df,temp],axis=1)
df.drop(['BUSI_TYPE_CODE',axis=1,inplace=True)
del temp

df = df.merge(jobinfo[['CUST_NO','URBAN_RURAL_FLAG']],on='CUST_NO',how='left')
temp = pd.get_dummies(df.URBAN_RURAL_FLAG,prefix='onehot_URBAN_RURAL_FLAG',dummy_na=True)
df = pd.concat([df,temp],axis=1)
df.drop(['URBAN_RURAL_FLAG'],axis=1,inplace=True)
del temp

# 供电单位编码,ratio为数据集中正例数据编码长度所占比例
df = df.merge(jobinfo[['CUST_NO','ORG_NO']],on='CUST_NO',how='left')
df['len_of_ORG_NO'] = df.ORG_NO.apply(lambda x: len(str(x)))\
df.fillna(-1,inplace=True)
train = df[df.label != -1]
ratio = {}
for i in train.ORG_NO.unique():
    ratio[i] = len(train.loc[(train.ORG_NO == i) & (train.label == 1)]) / len(train.loc[train.ORG_NO == i])
df['ratio_ORG_NO'] = df.ORG_NO.map(ratio)
temp = pd.get_dummies(df.len_of_ORG_NO,prefix='onehot_len_of_ORG_NO')
df = pd.concat([df,temp],axis=1)
df.drop(['ORG_NO','len_of_ORG_NO'],axis=1,inplace=True)
# 时间字段统计
df = df.merge(jobinfo[['CUST_NO','HANDLE_TIME']],on='CUST_NO',how='left')
df['date'] = df['HANDLE_TIME'].apply(lambda x: pd.to_datetime(x.split()[0]))
df['time'] = df['HANDLE_TIME'].apply(lambda x: x.split()[1])
df['month'] = df['date'].apply(lambda x: x.month)
df['day'] = df['date'].apply(lambda x: x.day)
df['is_in_first_tendays'] = 0
df.loc[df.day.isin(range(1,11)),'is_in_first_tendays'] = 1
df['is_in_middle_tendays'] = 0
df.loc[df.day.isin(range(11,21)),'is_in_middle_tendays'] = 1
df['is_in_last_tendays'] = 0
df.loc[df.day.isin(range(21,32)),'is_in_last_tendays'] = 1
df['hour'] = df.time.apply(lambda x: int(x.split(':')[0]))
df.drop(['HANDLE_TIME','date','time'],axis=1,inplace=True)
# 处理ELEC_TYPE
df = df.merge(jobinfo[['CUST_NO','ELEC_TYPE']],on='CUST_NO',how='left')
df.fillna(0,inplace=True)
df['head_of_ELEC_TYPE'] = df.ELEC_TYPE.apply(lambda x: str(X)[0])
df['is_ELEC_TYPE_NaN'] = 0
df.loc[df.ELEC_TYPE == 0,'is_ELEC_TYPE_NaN'] = 1
df['label_encoder_ELEC_TYPE'] = LabelEncoder().fit_transform(df['ELEC_TYPE'])
train = df[df.label != -1]
ratio = {}
for i in train.ELEC_TYPE.unique():
    ratio[i] = len(train.loc[(train.ELEC_TYPE == i) & (train.label == 1)]) / len(train.loc[train.ELEC_TYPE == i])
df['ratio_ELEC_TYPE'] = df.ELEC_TYPE.map(ratio)
df.fillna(0,inplace=True)
temp = pd.get_dummies(df.head_of_ELEC_TYPE,prefix='onehot_head_of_ELEC_TYPE')
df = pd.concat([df,temp],axis=1)
# 城市编码
df = df.merge(jobinfo[['CUST_NO','CITY_ORG_NO']],on='CUST_NO',how='left')
train = df[df.label != -1]
ratio = {}
for i in train.CITY_ORG_NO.unique():
    ratio[i] = len(train.loc[(train.CITY_ORG_NO == i) & (train.label == 1)]) / len(train.loc[train.CITY_ORG_NO == i])
df['ratio_CITY_ORG_NO'] = df.CITY_ORG_NO.map(ratio)
temp = pd.get_dummies(df.CITY_ORG_NO,prefix='onehot_CITY_ORG_NO')
df = pd.concat([df,temp],axis=1)
df.drop(['CITY_ORG_NO'],axis=1,inplace=True)
train_flow = pd.read_csv(data_path + file_flow_train,sep='\t')
test_flow = pd.read_csv(data_path + file_flow_test,sep='\t')
flow = train_flow.append(test_flow).copy()
flow.rename(columns={'CONS_NO':'CUST_NO'},inplace=True)
flow.drop_duplicates(inplace=True)
flow = flow.loc[flow.CUST_NO.isin(df.CUST_NO)].copy()

flow['T_PQ'] = flow.T_PQ.apply(lambda x: -x if x<0 else x)
flow['RCVBL_AMT'] = flow.RCVBL_AMT.apply(lambda x: -x if x<0 else x)
flow['RCVED_AMT'] = flow.RCVEB_AMT.apply(lambda x: -x if x<0 else x)
flow['OWE_AMT'] = flow.OWE_AMT.apply(lambda x: -x if x<0 else x)
df['has_biao9'] = 0
df.loc[df.CUST_NO.isin(flow.CUST_NO),'has_biao9'] = 1
df['counts_of_09flow'] = df.CUST_NO.map(flow.groupby('CUST_NO').size())

# 构建统计特征
# 应收金额
df['sum_yingshoujine'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVBL_AMT.sum()) + 1)
df['mean_yingshoujine'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVBL_AMT.mean()) + 1)
df['max_yingshoujine'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVBL_AMT.max()) + 1)
df['min_yingshoujine'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVBL_AMT.min()) + 1)
df['std_yingshoujine'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVBL_AMT.std()) + 1)

# 实收金额
df['sum_shishoujine'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVED_AMT.sum()) + 1)
# 欠费
df['qianfei'] = df['sum_yingshoujine'] - df['sum_shishoujine']

# 电量
df['sum_T_PQ'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').T_PQ.sum()) + 1)
df['mean_T_PQ'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').T_PQ.mean()) + 1)
df['max_T_PQ'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').T_PQ.max()) + 1)
df['min_T_PQ'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').T_PQ.min()) + 1)
df['std_T_PQ'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').T_PQ.std()) + 1)

# 电费金额
df['sum_OWE_AMT'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').OWE_AMT.sum()) + 1)
df['mean_OWE_AMT'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').OWE_AMT.mean()) + 1)
df['max_OWE_AMT'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').OWE_AMT.max()) + 1)
df['min_OWE_AMT'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').OWE_AMT.min()) + 1)
df['std_OWE_AMT'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').OWE_AMT.std()) + 1)

# 电费金额和应收金额差多少
df['dianfei_jian_yingshoujine'] = df['sum_OWE_AMT'] - df['sum_yingshoujine']

# 应收违约金
df['sum_RCVBL_PENALTY'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVBL_PENALTY.sum()) + 1)
df['mean_RCVBL_PENALTY'] = log(df.CUST_NO.map(flow.groupby('CUSTT_NO').RCVBL_PENALTY.mean()) + 1)
df['max_RCVBL_PENALTY'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVBL_PENALTY.max()) + 1)
df['min_RCVBL_PENALTY'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVBL_PENALTY.min()) + 1)
df['std_RCVBL_PENALTY'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVBL_PENALTY.std()) + 1)

# 实收违约金
df['sum_RCVED_PENALTY'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVED_PENALTY.sum()) + 1)
df['mean_RCVED_PENALTY'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVED_PENALTY.mean()) + 1)
df['max_RCVED_PENALTY'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVED_PENALTY.max()) + 1)
df['min_RCVED_PENALTY'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVED_PENALTY.min()) + 1)
df['std_RCVED_PENALTY'] = log(df.CUST_NO.map(flow.groupby('CUST_NO').RCVED_PENALTY.std()) + 1)

df['chaduoshao_weiyuejin'] = df['sum_RCVBL_PENALTY'] - df['sum_RCVED_PENALTY']
# 用户记录条数及每个月几条
df['nunique_RCVBL_YM'] = df.CUST_NO.map(flow.groupby('CUST_NO').RCVBL_YM.nunique())
df['mean_RCVBL_YM'] = df['counts_of_09flow'] / df['nunique_RCVBL_YM']
del train_flow,test_flow,flow

# 保存统计特征
if not os.path.isdir('../myfeatures'):
    os.makedirs('../myfeatures')
pickle.dump(df,open('../myfeatures/statistical_featrues_1.pkl','wb')
''' 开始处理表中的文本特征 '''
mywords = ['户号','分时','抄表','抄表示数','工单','单号','工单号','空气开关','脉冲灯','计量表','来电','报修']
for word in mywords:
    jieba.add_word(word)

stops = set()
with open('E:\电费敏感预测\stopwords.txt',encoding='utf-8') as f:
    for word in f:
        word = word.strip()
        stops.add(word)

def fenci(line):
    res = []
    words = jieba.cut(line)
    for word in words:
        if word not in stops:
            res.append(word)
    return ''.join(res)
jobinfo['contents'] = jobinfo.ACCEPT_CONTENT.apply(lambda x: fenci(x))

# 处理掉其中出现的手机号、户号等基本没啥用的数据
def hash_number():
    shouji_pattern = re.compile('\s1\d{10}\s|\s1\d{10}\Z')
    if shouji_pattern.findall(x):
        x = re.sub(shouji_pattern, ' 手机number ', x)
    
    huhao_pattern = re.compile('\s\d{10}\s|\s\d{10}\Z')
    if huhao_pattern.findall(x):
        x = re.sub(huhao_pattern, ' 户号number ', x)
            
    tuiding_pattern = re.compile('\s\d{11}\s|\s\d{11}\Z')
    if tuiding_pattern.findall(x):
        x = re.sub(tuiding_pattern, ' 退订number ', x)
            
    gongdan_pattern = re.compile('\s201\d{13}\s|\s201\d{13}\Z')
    if gongdan_pattern.findall(x):
        x = re.sub(gongdan_pattern, ' 工单number ', x)
            
    tingdian_pattern = re.compile('\s\d{12}\s|\s\d{12}\Z')
    if tingdian_pattern.findall(x):
        x = re.sub(tingdian_pattern, ' 停电number ', x)
        
    return x.strip()   

jobinfo['content'] = jobinfo['contents].apply(lambda x: hash_number(x))
jobinfo['len_of_contents'] = jobinfo.content.apply(lambda x: len(x.split()))
jobinfo['counts_of_words'] = jobinfo.content.apply(lambda x: len(set(x.split())))
text = df[['CUST_NO']].copy()
text = text.merge(jobinfo[['CUST_NO','len_of_contents','counts_of_words','content']],on='CUST_NO',how='left')
text = text.rename(columns={'content':'contents'})
pickle.dump(text,open('../myfeatures/text_features_1.pkl','wb')) 
''' 读取保存的特征和数据集,进行特征筛选 '''
import pickle
from scipy.sparse import csc_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
import xgboost as xgb

df = pickle.load(open('../myfeatures/statistical_features_1.pkl','rb'))
text = pickle.load(open('../myfeatures/text_features_1.pkl','rb'))
df = df.merge(text,on='CUST_NO',how='left')

train = df.loc[df.label != -1]
test = df.loc[df.label = -1]
x_data = train.copy()
x_val = test.copy()
x_data = x_data.sample(frac=1,random_state=1).reset_index(drop=True)

delete_columns = ['CUST_NO','label','contents']
X_train_1 = csc_matrix(x_data.drop(delete_columns,axis=1).values)
X_val_1 = csc_matrix(x_val.drop(delete_columns,axis=1).values)
y_train = x_data.label.values
y_val = x_val.label.values

# 构造tf-idf特征
featurenames = list(x_data.drop(delete_columns,axis=1).columns)
print('creating ifidf...')
tfidf = TfidfVectorizer(ngram_range=(1,2),min_df=3,use_idf=False,smooth_idf=False,sublinear_tf=True)
tfidf.fit(x_data.contents)
word_names = tfidf.get_feature_names()
X_train_2 = tfidf.transform(x_data.contents)
X_val_2 = tfidf.transform(x_data.contents)
statistic_feature = featurenames.copy()
featurenames.extend(word_names)
from scipy.sparse import hstack
X_train = hstack(((X_train_1),(X_train_2))).tocsc()
X_val = hstack(((X_val_1),(X_val_2))).tocsc()

# 利用xgb筛选文本特征,实现降维
dtrain = xgb.DMatrix(X_train,y_train,feature_names=featurenames)
dval = xgb.DMatrix(X_val,feature_names=featurenames)
params = {"objective":"binary:logistic","booster":"gbtree","eval_metric":"error",
"eta":0.1,"max_depth":12,"subsample":0.8,"min_child_weight":3,"colsample_bytree":1,
"gamma":0.2,"lambda":300,"silent":1,"seed":1}

watchlist = [(dtrain,'train')]
model = xgb.train(params,dtrain,2500,evals=watchlist,early_stopping_rounds=100,verbose_eval=100)
print("训练完毕!")

temp = pd.DataFrame.from_dict(model.get_fscore(),orient='index').reset_index()
temp.columns = ['feature','score']
temp.sort_values(['score'],axis=0,ascending=False,inplace=True)
temp.reset_index(drop=True,inplace=True)
print("留下文本特征数量:",len(temp.loc[~temp.feature.isin(statistic_feature)]))
selected_words = list(temp.loc[~temp.feature.isin(statistic_feature)].feature.values)
pickle.dump(selected_words,open('../myfeatures/single_select_words.pkl','wb'))
''' 基于以上特征筛选,构建模型 '''
df = pickle.load(open('../myfeatures/statistical_features_1.pkl','rb'))
text = pickle.load(open('../myfeatures/text_features_1.pkl','rb'))
df = df.merge(text,on='CUST_NO',how='left')
train = df.loc[df.label != -1]
test = df.loc[df.label == -1]

x_data = train.copy()
x_val = test.copy()
x_data = x_data.sample(frac=1,random_state=1).reset_index(drop=True)
delete_columns = ['CUST_NO','label','contents']
X_train_1 = csc_matrix(x_data.drop(delete_columns,axis=1).as_matrix())
X_val_1 = csc_matrix(x_val.drop(delete_columns,axis=1).as_matrix())
y_train = x_data.label.values
y_val = x_val.label.values
featurenames = list(x_data.drop(delete_columns,axis=1).columns)
print('tfidf...')
select_words = pickle.load(open('../myfeatures/single_select_words.pkl','rb'))
tfidf = TfidfVectorizer(ngram_range=(1,2),min_df=3,use_idf=False,smooth_idf=False,sublinear=True,vocabulary=select_words)
tfidf.fit(x_data.contents)
word_names = tfidf.get_feature_names()
X_train_2 = tfidf.transform(x_data.contents)
X_val_2 = tfidf.transform(x_val.contents)
statistic_feature = featurenames.copy()
featurenames.extend(word_names)
from scipy.sparse import hstack
X_train = hstack(((X_train_1),(X_train_2))).tocsc()
X_val = hstack(((X_val_1),(X_val_2)))).tocsc()

print(" start 3 xgboost...")
bagging = []
for i in range(1,4):
    print("group:",i)
    dtrain = xgb.DMatrix(X_train,y_train,feature_names = featurenames)
    dval = xgb.DMatrix(X_val,feature_names = featurenames)
    params = {
        "objective": "binary:logistic",
        "booster": "gbtree",
        "eval_metric": "error",
        "eta": 0.1,
        'max_depth':14,
        'subsample':0.8,
        'min_child_weight':2,
        'colsample_bytree':1,
        'gamma':0.2,
        "lambda":300,
        'silent':1,
        "seed":i,
    }
    watchlist = [(dtrain,'train')]
    model = xgb.train(params,dtrain,2000,evals=watchlist,early_stopping_rounds=50,verbose_eval=100)
    print("predicting...")
    y_prob = model.predict(dval,ntree_limit=model.best_ntree_limit)
    bagging.append(y_prob)
    

print("voting...")

def threshold(y,t):
    z = np.copy(y)
    z[z>=t] = 1
    z[z<t] = 0
    return z

t = 0.5
pres = []
for i in bagging:
    pres.append(threshold(i,t))

pres = np.array(pres).T.astype('int64')
result = []
for line in pres:
    result.append(np.bincount(line).argmax())

myout = test[['CUST_NO']].copy()
myout['pre'] = result
print("output...")
if not os.path.isdir('../result'):
    os.makedirs('../result')
myout.loc[myout.pre == 1,'CUST_NO'].to_csv('../result/A.csv',index=False)

本文参考网易云课堂《微专业数据分析师》

 

  • 0
    点赞
  • 18
    收藏
    觉得还不错? 一键收藏
  • 5
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值