AI比赛-推荐系统(一)-新闻推荐02:训练item词向量【每篇新闻文章对应一个词向量】【word2vec:将每个用户点击的新闻文章序列作为输入(类似文本序列),训练出每篇文章的向量表示】【天池】

安装gensim,使用gensim中的word2vec模型

 

# 安装gensim
!pip install gensim
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from gensim.models import Word2Vec
import logging

一、读取原始数据

1、加载原始数据 

path = './data_raw/'
save_path = './temp_results/'

2、读取训练数据集 

# 训练数据集
trn_click = pd.read_csv(path + 'train_click_log.csv')
trn_click = trn_click.sort_values('user_id')
trn_click
user_idclick_article_idclick_timestampclick_environmentclick_deviceGroupclick_osclick_countryclick_regionclick_referrer_type
11126200157507150821170252041171252
1112619030760150821167252041171252
1112602163746150821134688941171256
11126011289197150821131688941171256
11126002168401150821146869543201252
..............................
1081895199999218355150817686708841171131
660731199999161191150766535118641171131
66073219999942223150766538118641171131
211041199999123909150722698786441171131
0199999160417150702957019041171131

1112623 rows × 9 columns

3、读取测试数据集

# 读取测试数据集
tst_click = pd.read_csv(path + 'testA_click_log.csv')
tst_click = tst_click.sort_values('user_id')
tst_click
user_idclick_article_idclick_timestampclick_environmentclick_deviceGroupclick_osclick_countryclick_regionclick_referrer_type
138222200000195839150703036399941171171
138223200000191971150703039399941171171
378656200000194300150765146128041171171
13822120000117504015070295364424321187
13821920000229790615070299460644117181
..............................
155995249999313431150705256068541171132
178071249999214800150711728749741171132
178070249999233717150711725749741171132
188451249999233717150713351021341171132
0249999160974150695914282041171132

518010 rows × 9 columns

4、读取文章信息

# 读取文章信息
item_df = pd.read_csv(path + 'articles.csv')
item_df = item_df.sort_values('article_id')
item_df
article_idcategory_idcreated_at_tswords_count
0001513144419000168
1111405341936000189
2211408667706000250
3311408468313000230
4411407071171000162
...............
3640423640424601434034118000144
3640433640434601434148472000463
3640443640444601457974279000177
3640453640454601515964737000126
3640463640464601505811330000479

364047 rows × 4 columns

二、数据预处理

1、重命名item_df,与trn_click、tst_click中保持一致方便后续match  

# 重命名item_df,与trn_click、tst_click中保持一致方便后续match  
item_df = item_df.rename(columns={'article_id': 'click_article_id'})  
item_df
click_article_idcategory_idcreated_at_tswords_count
0001513144419000168
1111405341936000189
2211408667706000250
3311408468313000230
4411407071171000162
...............
3640423640424601434034118000144
3640433640434601434148472000463
3640443640444601457974279000177
3640453640454601515964737000126
3640463640464601505811330000479

364047 rows × 4 columns

2、计算用户点击文章的次数,并添加新的一列count

# 计算用户点击文章的次数,并添加新的一列count
trn_click['click_cnts'] = trn_click.groupby(['user_id'])['click_timestamp'].transform('count')
trn_click
user_idclick_article_idclick_timestampclick_environmentclick_deviceGroupclick_osclick_countryclick_regionclick_referrer_typeclick_cnts
111262001575071508211702520411712522
11126190307601508211672520411712522
11126021637461508211346889411712562
111260112891971508211316889411712562
111260021684011508211468695432012522
.................................
108189519999921835515081768670884117113111
66073119999916119115076653511864117113111
6607321999994222315076653811864117113111
21104119999912390915072269878644117113111
019999916041715070295701904117113111

1112623 rows × 10 columns

tst_click['click_cnts'] = tst_click.groupby(['user_id'])['click_timestamp'].transform('count')
tst_click
user_idclick_article_idclick_timestampclick_environmentclick_deviceGroupclick_osclick_countryclick_regionclick_referrer_typeclick_cnts
1382222000001958391507030363999411711713
1382232000001919711507030393999411711713
3786562000001943001507651461280411711713
138221200001175040150702953644243211871
138219200002297906150702994606441171817
.................................
15599524999931343115070525606854117113219
17807124999921480015071172874974117113219
17807024999923371715071172574974117113219
18845124999923371715071335102134117113219
024999916097415069591428204117113219

518010 rows × 10 columns

3、合并trn_click与item_df、tst_click与item_df

trn_click = trn_click.merge(item_df, how='left', on=['click_article_id'])   # 合并trn_click与item_df【外键为click_article_id】
trn_click
user_idclick_article_idclick_timestampclick_environmentclick_deviceGroupclick_osclick_countryclick_regionclick_referrer_typeclick_cntscategory_idcreated_at_tswords_count
0015750715082117025204117125222811508236945000370
10307601508211672520411712522261508185091000162
216374615082113468894117125621331508142585000162
3128919715082113168894117125624181508179909000176
4216840115082114686954320125222971507663321000215
..........................................
1112618199999218355150817686708841171131113521508155745000202
1112619199999161191150766535118641171131112811507646579000285
11126201999994222315076653811864117113111671507648195000186
1112621199999123909150722698786441171131112501507198955000240
1112622199999160417150702957019041171131112811506942089000173

1112623 rows × 13 columns

tst_click = tst_click.merge(item_df, how='left', on=['click_article_id'])   # 合并tst_click与item_df【外键为click_article_id】
tst_click
user_idclick_article_idclick_timestampclick_environmentclick_deviceGroupclick_osclick_countryclick_regionclick_referrer_typeclick_cntscategory_idcreated_at_tswords_count
020000019583915070303639994117117133171507011388000177
120000019197115070303939994117117133091507013094000222
220000019430015076514612804117117133171507636150000202
32000011750401507029536442432118712991506974928000176
42000022979061507029946064411718174281506958329000218
..........................................
518005249999313431150705256068541171132194311507037015000222
518006249999214800150711728749741171132193481507099489000227
518007249999233717150711725749741171132193751507089657000184
518008249999233717150713351021341171132193751507089657000184
518009249999160974150695914282041171132192811506912747000259

518010 rows × 13 columns

4、合并训练集与测试集

# 合并训练集与测试集
all_click = trn_click.append(tst_click)
all_click
user_idclick_article_idclick_timestampclick_environmentclick_deviceGroupclick_osclick_countryclick_regionclick_referrer_typeclick_cntscategory_idcreated_at_tswords_count
0015750715082117025204117125222811508236945000370
10307601508211672520411712522261508185091000162
216374615082113468894117125621331508142585000162
3128919715082113168894117125624181508179909000176
4216840115082114686954320125222971507663321000215
..........................................
518005249999313431150705256068541171132194311507037015000222
518006249999214800150711728749741171132193481507099489000227
518007249999233717150711725749741171132193751507089657000184
518008249999233717150713351021341171132193751507089657000184
518009249999160974150695914282041171132192811506912747000259

1630633 rows × 13 columns

5、把时间进行归一化操作

# 为了更好的可视化,这里把时间进行归一化操作
mm = MinMaxScaler()
all_click['click_timestamp'] = mm.fit_transform(all_click[['click_timestamp']])
all_click['created_at_ts'] = mm.fit_transform(all_click[['created_at_ts']])
all_click
user_idclick_article_idclick_timestampclick_environmentclick_deviceGroupclick_osclick_countryclick_regionclick_referrer_typeclick_cntscategory_idcreated_at_tswords_count
001575070.3437194117125222810.992941370
10307600.343711411712522260.992790162
21637460.3436224117125621330.992666162
312891970.3436134117125624180.992775176
421684010.3436554320125222970.991274215
..........................................
5180052499993134310.02565941171132194310.989453222
5180062499992148000.04341941171132193480.989635227
5180072499992337170.04341141171132193750.989606184
5180082499992337170.04787141171132193750.989606184
5180092499991609740.00002541171132192810.989092259

1630633 rows × 13 columns

6、把训练数据集按click_timestamp排序

all_click = all_click.sort_values('click_timestamp')
all_click
user_idclick_article_idclick_timestampclick_environmentclick_deviceGroupclick_osclick_countryclick_regionclick_referrer_typeclick_cntscategory_idcreated_at_tswords_count
5179122499901623000.0000004320125252810.989186193
5179882499981609740.0000024112113252810.989092259
5178672499851609740.000003411718282810.989092259
5177972499791623000.0000044117125222810.989186193
5178752499881609740.00000441171212172810.989092259
..........................................
4966621211582241480.77910541171132543540.997195212
234746702542076720.86017741171202143310.998288242
23474870254963330.86018541171202142090.998272299
595324652035380.999992411712283270.999741275
595424651453091.000000411712282691.000000216

1630633 rows × 13 columns

三、训练各个文章的Embedding

1、将click_article_id字段的数据类型转为字符串类型

word2vec模型的输入要求为字符串类型

all_click.info()
<class 'pandas.core.frame.DataFrame'>
Int64Index: 1630633 entries, 0 to 518009
Data columns (total 13 columns):
 #   Column               Non-Null Count    Dtype  
---  ------               --------------    -----  
 0   user_id              1630633 non-null  int64  
 1   click_article_id     1630633 non-null  int64  
 2   click_timestamp      1630633 non-null  float64
 3   click_environment    1630633 non-null  int64  
 4   click_deviceGroup    1630633 non-null  int64  
 5   click_os             1630633 non-null  int64  
 6   click_country        1630633 non-null  int64  
 7   click_region         1630633 non-null  int64  
 8   click_referrer_type  1630633 non-null  int64  
 9   click_cnts           1630633 non-null  int64  
 10  category_id          1630633 non-null  int64  
 11  created_at_ts        1630633 non-null  float64
 12  words_count          1630633 non-null  int64  
dtypes: float64(2), int64(11)
memory usage: 174.2 MB
# 只有转换成字符串才可以进行训练
all_click['click_article_id'] = all_click['click_article_id'].astype(str)
all_click.info()
<class 'pandas.core.frame.DataFrame'>
Int64Index: 1630633 entries, 0 to 518009
Data columns (total 13 columns):
 #   Column               Non-Null Count    Dtype  
---  ------               --------------    -----  
 0   user_id              1630633 non-null  int64  
 1   click_article_id     1630633 non-null  object 
 2   click_timestamp      1630633 non-null  float64
 3   click_environment    1630633 non-null  int64  
 4   click_deviceGroup    1630633 non-null  int64  
 5   click_os             1630633 non-null  int64  
 6   click_country        1630633 non-null  int64  
 7   click_region         1630633 non-null  int64  
 8   click_referrer_type  1630633 non-null  int64  
 9   click_cnts           1630633 non-null  int64  
 10  category_id          1630633 non-null  int64  
 11  created_at_ts        1630633 non-null  float64
 12  words_count          1630633 non-null  int64  
dtypes: float64(2), int64(10), object(1)
memory usage: 174.2+ MB

2、将每个用户的新闻点击序列转换成句子的形式

# 转换成句子的形式
docs = all_click.groupby(['user_id'])['click_article_id'].apply(lambda x: list(x)).reset_index()
docs
user_idclick_article_id
00[30760, 157507]
11[289197, 63746]
22[36162, 168401]
33[50644, 36162]
44[42567, 39894]
.........
249995249995[300470, 16129, 160974, 182394, 198659, 272143...
249996249996[160974]
249997249997[183665, 181686, 123909, 74719, 124667, 124337...
249998249998[160974, 202557, 237524, 236207, 235105]
249999249999[160974, 160417, 162338, 313431, 233717, 21480...

250000 rows × 2 columns

docs = docs['click_article_id'].values.tolist()
docs
Output exceeds the size limit. Open the full output data in a text editor
[['30760', '157507'],
 ['289197', '63746'],
 ['36162', '168401'],
 ['50644', '36162'],
 ['42567', '39894'],
 ['211442', '234481'],
 ['62464', '10023'],
 ['50644', '211442'],
 ['70986', '50644'],
 ['70986', '211442', '211455'],
 ['50644', '159195'],
 ['50644', '234481'],
 ['211442', '211455'],
 ['36162', '277107'],
 ['70986', '36162'],
 ['277107', '342473', '206415'],
 ['50644', '211442'],
 ['156279', '158331', '363916'],
 ['70986', '224730'],
 ['70986', '205824'],
 ['285433', '285300'],
 ['205958', '70758'],
 ['107014', '107190'],
 ['309535', '309311'],
 ['211442', '156279'],
...
 ['234481', '30760'],
 ['209122', '234308'],
 ['209122', '70986', '284470', '277712'],
 ['70986', '122152'],
 ...]

3、训练Word2Vec模型

# 为了方便查看训练的进度,这里设定一个log信息
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.INFO)
# 这里的参数对训练得到的向量影响也很大,默认负采样为5【为节约时间这里模型只迭代了一次,为方便查看设置vector_size=16,vector_size可设置为100】
w2v = Word2Vec(sentences=docs, sg=1, window=5, seed=2020, vector_size=16, workers=1, min_count=1, epochs=1) # 需要注意:
# 保存成字典的形式
item_w2v_emb_dict = {k: w2v.wv[k] for k in all_click['click_article_id']}
Output exceeds the size limit. Open the full output data in a text editor
2023-01-29 01:59:03,467:INFO:collecting all words and their counts
2023-01-29 01:59:03,468:INFO:PROGRESS: at sentence #0, processed 0 words, keeping 0 word types
2023-01-29 01:59:03,475:INFO:PROGRESS: at sentence #10000, processed 25727 words, keeping 3473 word types
2023-01-29 01:59:03,483:INFO:PROGRESS: at sentence #20000, processed 53883 words, keeping 5811 word types
2023-01-29 01:59:03,492:INFO:PROGRESS: at sentence #30000, processed 84881 words, keeping 7676 word types
2023-01-29 01:59:03,501:INFO:PROGRESS: at sentence #40000, processed 118390 words, keeping 9297 word types
2023-01-29 01:59:03,511:INFO:PROGRESS: at sentence #50000, processed 154179 words, keeping 10844 word types
2023-01-29 01:59:03,523:INFO:PROGRESS: at sentence #60000, processed 192350 words, keeping 12357 word types
2023-01-29 01:59:03,536:INFO:PROGRESS: at sentence #70000, processed 233685 words, keeping 13473 word types
2023-01-29 01:59:03,551:INFO:PROGRESS: at sentence #80000, processed 281335 words, keeping 14939 word types
2023-01-29 01:59:03,565:INFO:PROGRESS: at sentence #90000, processed 329973 words, keeping 16420 word types
2023-01-29 01:59:03,578:INFO:PROGRESS: at sentence #100000, processed 379428 words, keeping 17904 word types
2023-01-29 01:59:03,593:INFO:PROGRESS: at sentence #110000, processed 431464 words, keeping 18928 word types
2023-01-29 01:59:03,611:INFO:PROGRESS: at sentence #120000, processed 489655 words, keeping 20157 word types
2023-01-29 01:59:03,629:INFO:PROGRESS: at sentence #130000, processed 550375 words, keeping 21588 word types
2023-01-29 01:59:03,649:INFO:PROGRESS: at sentence #140000, processed 613031 words, keeping 22923 word types
2023-01-29 01:59:03,669:INFO:PROGRESS: at sentence #150000, processed 678645 words, keeping 24209 word types
2023-01-29 01:59:03,691:INFO:PROGRESS: at sentence #160000, processed 749559 words, keeping 25743 word types
2023-01-29 01:59:03,714:INFO:PROGRESS: at sentence #170000, processed 831064 words, keeping 27232 word types
2023-01-29 01:59:03,738:INFO:PROGRESS: at sentence #180000, processed 914233 words, keeping 28612 word types
2023-01-29 01:59:03,766:INFO:PROGRESS: at sentence #190000, processed 1004976 words, keeping 29699 word types
2023-01-29 01:59:03,800:INFO:PROGRESS: at sentence #200000, processed 1112623 words, keeping 31116 word types
2023-01-29 01:59:03,824:INFO:PROGRESS: at sentence #210000, processed 1200577 words, keeping 31798 word types
2023-01-29 01:59:03,850:INFO:PROGRESS: at sentence #220000, processed 1285942 words, keeping 32381 word types
2023-01-29 01:59:03,878:INFO:PROGRESS: at sentence #230000, processed 1380836 words, keeping 33131 word types
...
2023-01-29 01:59:08,532:INFO:EPOCH 0 - PROGRESS: at 94.73% examples, 317616 words/s, in_qsize 1, out_qsize 0
2023-01-29 01:59:09,130:INFO:EPOCH 0: training on 1630633 raw words (1453015 effective words) took 4.7s, 309765 effective words/s
2023-01-29 01:59:09,131:INFO:Word2Vec lifecycle event {'msg': 'training on 1630633 raw words (1453015 effective words) took 4.7s, 309335 effective words/s', 'datetime': '2023-01-29T01:59:09.131480', 'gensim': '4.3.0', 'python': '3.9.15 | packaged by conda-forge | (main, Nov 22 2022, 15:55:03) \n[GCC 10.4.0]', 'platform': 'Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.35', 'event': 'train'}
2023-01-29 01:59:09,132:INFO:Word2Vec lifecycle event {'params': 'Word2Vec<vocab=35380, vector_size=16, alpha=0.025>', 'datetime': '2023-01-29T01:59:09.132038', 'gensim': '4.3.0', 'python': '3.9.15 | packaged by conda-forge | (main, Nov 22 2022, 15:55:03) \n[GCC 10.4.0]', 'platform': 'Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.35', 'event': 'created'}
item_w2v_emb_dict
Output exceeds the size limit. Open the full output data in a text editor
{'162300': array([-0.37774816,  1.3504976 , -0.23309161,  0.26072636, -0.23989849,
         0.5004345 , -0.56577134, -0.5481367 ,  0.2039964 ,  0.8342705 ,
        -0.48475933, -0.52761525,  0.20438878,  1.1823852 , -0.4367902 ,
         0.5961195 ], dtype=float32),
 '160974': array([-0.6204943 ,  1.9116834 , -0.46083373,  0.40015092, -0.1149492 ,
         0.16426347, -0.79475456, -0.5675412 , -0.11137734,  0.8231001 ,
        -0.40696675, -0.41709152,  0.46059853,  1.4314909 , -0.43337965,
         0.6168017 ], dtype=float32),
 '158082': array([-0.31103644,  1.366509  , -0.15305053,  0.19239815, -0.22246826,
         0.5307462 , -0.6013954 , -0.5462901 ,  0.15414776,  0.7907959 ,
        -0.47677982, -0.5615761 ,  0.13641724,  1.0848166 , -0.49600208,
         0.577961  ], dtype=float32),
 '158536': array([-0.8572138 ,  2.3342323 , -0.820582  ,  0.59009516, -0.11867882,
        -0.00443581, -0.90663487, -0.48073447, -0.21962236,  0.72721934,
        -0.5553819 , -0.38393563,  0.44512793,  1.4385353 , -0.60131   ,
         0.8454735 ], dtype=float32),
 '300470': array([-3.0404758e-01,  1.6421673e+00, -3.3394170e-01,  1.7405626e-01,
        -3.1160885e-01,  4.3005905e-01, -6.1486483e-01, -5.4003429e-01,
         7.3214585e-04,  8.6553878e-01, -3.9799103e-01, -4.2683634e-01,
         4.4759423e-01,  1.5222788e+00, -4.0337685e-01,  5.7117921e-01],
       dtype=float32),
 '59758': array([-0.525708  ,  1.5093073 , -0.56673056,  0.20997894, -0.18883261,
         0.15363155, -0.53620964, -0.47269717, -0.02236754,  0.80978376,
        -0.3462906 , -0.26018006,  0.27204517,  1.4290353 , -0.47859445,
         0.36089382], dtype=float32),
...
 '63596': array([ 0.01696501,  0.02612317,  0.00530556,  0.03226124,  0.01429469,
        -0.04120407, -0.01667616, -0.03267057, -0.00297171, -0.05766348,
        -0.00219459, -0.02344081,  0.00532304,  0.0203535 , -0.03306687,
         0.01739437], dtype=float32),
 ...}

4、将推荐列表字典的形式转换成df

# 将推荐列表字典的形式转换成df
articles_embedding_list = []
for article_id, embedding in item_w2v_emb_dict.items():
    embedding = embedding.tolist()
    articles_embedding_list.append([article_id] + embedding)
articles_embedding_df = pd.DataFrame(articles_embedding_list, columns=['article_id'] + ['emb_' + str(i) for i in range(16)])
articles_embedding_df
article_idemb_0emb_1emb_2emb_3emb_4emb_5emb_6emb_7emb_8emb_9emb_10emb_11emb_12emb_13emb_14emb_15
0162300-0.3777481.350498-0.2330920.260726-0.2398980.500435-0.565771-0.5481370.2039960.834270-0.484759-0.5276150.2043891.182385-0.4367900.596120
1160974-0.6204941.911683-0.4608340.400151-0.1149490.164263-0.794755-0.567541-0.1113770.823100-0.406967-0.4170920.4605991.431491-0.4333800.616802
2158082-0.3110361.366509-0.1530510.192398-0.2224680.530746-0.601395-0.5462900.1541480.790796-0.476780-0.5615760.1364171.084817-0.4960020.577961
3158536-0.8572142.334232-0.8205820.590095-0.118679-0.004436-0.906635-0.480734-0.2196220.727219-0.555382-0.3839360.4451281.438535-0.6013100.845474
4300470-0.3040481.642167-0.3339420.174056-0.3116090.430059-0.614865-0.5400340.0007320.865539-0.397991-0.4268360.4475941.522279-0.4033770.571179
......................................................
35375224148-0.1063980.048206-0.058324-0.0399490.0492420.056162-0.0107600.023729-0.0168450.0384990.0121830.0250610.074791-0.014461-0.0077820.029464
353762076720.0093380.037096-0.0423520.0211710.027002-0.027749-0.0289150.0289780.0006940.0509910.010830-0.005212-0.0180710.024465-0.0035640.020691
3537796333-0.0963000.0456160.0199980.0532100.0469970.038450-0.0273360.010665-0.0064310.0085830.0322280.008812-0.0023470.0074750.037719-0.024010
35378203538-0.0460940.0168300.036601-0.0132130.057705-0.031858-0.0104300.005571-0.005230-0.011425-0.007605-0.0492610.001949-0.0329220.0130380.004562
353791453090.0034390.0543410.054563-0.022907-0.028525-0.045272-0.0463970.0269550.0157460.022889-0.009937-0.0427050.0003790.0315640.058540-0.043099

35380 rows × 17 columns

5、保存利用word2vec训练好的每篇文章的Embedding

articles_embedding_df.to_csv(save_path + '/articles_emb.csv', index=False, header=True)

四、使用word2vec训练得到的词向量进行可视化

# 使用word2vec训练得到的词向量进行可视化
def get_item_sim_list(df):
    sim_list = []
    item_list = df['click_article_id'].values
    for i in range(0, len(item_list)-1):
        emb1 = item_w2v_emb_dict[str(item_list[i])] # 需要注意的是word2vec训练时候使用的是str类型的数据
        emb2 = item_w2v_emb_dict[str(item_list[i+1])]
        sim_list.append(np.dot(emb1,emb2)/(np.linalg.norm(emb1)*(np.linalg.norm(emb2))))
    sim_list.append(0)

    return sim_list

# 随机选择5个用户,查看这些用户前后查看文章的相似性
sub_user_ids = np.random.choice(all_click.user_id.unique(), size=5, replace=False)
sub_user_info = all_click[all_click['user_id'].isin(sub_user_ids)]
for _, user_df in sub_user_info.groupby('user_id'):
    item_sim_list = get_item_sim_list(user_df)
    print("item_sim_list = ", item_sim_list)
    plt.plot(item_sim_list)
item_sim_list =  [0.92883706, 0]
item_sim_list =  [0.9124602, 0]
item_sim_list =  [0.9199342, 0.96343744, 0]
item_sim_list =  [0.9781094, 0.96341544, 0.97788256, 0.9595503, 0.8217939, 0.72416514, 0.9395525, 0.7885189, 0.9457342, 0.88077354, 0.97478765, 0.9660947, 0.98741436, 0.741313, 0.87396616, 0.929134, 0]
item_sim_list =  [0.96066815, 0.9530401, 0.6379823, 0.9544767, 0.97723705, 0.9679809, 0.9853506, 0.7811122, 0.75035423, 0]

零基础入门推荐系统 - 新闻推荐_学习赛_天池大赛-阿里云天池 

零基础入门推荐系统【数据分析】Task2_天池notebook-阿里云天池

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值