随记

下载数据:


import wget, tarfile
chusai_f = 'https://tesla-ap-shanghai-1256322946.cos.ap-shanghai.myqcloud.com/cephfs/tesla_common/deeplearning/dataset/algo_contest/train_preliminary.zip'
test_f = 'https://tesla-ap-shanghai-1256322946.cos.ap-shanghai.myqcloud.com/cephfs/tesla_common/deeplearning/dataset/algo_contest/test.zip'
fusai_f = 'https://tesla-ap-guangzhou-1256322946.cos.ap-guangzhou.myqcloud.com/cephfs/tesla_common/deeplearning/dataset/algo_contest/train_semi_final.zip'
filename = wget.download(fusai_f)
print(filename)
import zipfile
zFile = zipfile.ZipFile(filename, "r")
for fileM in zFile.namelist(): 
    zFile.extract(fileM, "./")
    print(fileM)
zFile.close();

工具

import os
import _pickle as pickle


def save_pickle(data, filepath):
    with open(filepath, 'wb') as fw:
        pickle.dump(data, fw ,protocol=4)

def load_pickle(filepath):
    with open(filepath, 'rb') as fr:
        data = pickle.load(fr)
    return data

def checkout_dir(dir_path, do_delete=False):
    import shutil
    if do_delete and os.path.exists(dir_path):
        shutil.rmtree(dir_path)
    if not os.path.exists(dir_path):
        print(dir_path, 'make dir ok')
        os.makedirs(dir_path)
    else:
        print(dir_path, ' is exits!')

class Load_Corpus_with_Iteration(object):  # 使用迭代器读取语料库
    def __init__(self, path):
        self.path = path

    def __iter__(self):
        for line in open(self.path, ):
            yield line.strip().split(',')

if __name__=='__main__':
    pass
    checkout_dir(os.path.join('../', 'zdq', 'zyx'))
    # a = [1,2,3]
    # file_path = 'a.pickle'
    # save_pickle(a, file_path)
    #
    # b = load_pickle(file_path)
    # print(b)

from collections import OrderedDict, namedtuple, defaultdict
from itertools import chain

from tensorflow.python import keras
import tensorflow as tf
print('=======================================')
print(tf.__version__)
print('=======================================')

gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
    print(gpu)
    tf.config.experimental.set_memory_growth(gpu, True)

DEFAULT_GROUP_NAME = "default_group"


def create_embedding_dict(sparse_feature_columns, varlen_sparse_feature_columns, init_std, seed, l2_reg,
                          prefix='sparse_', seq_mask_zero=True, init_embed_dic={}):
    sparse_embedding = OrderedDict()
    if varlen_sparse_feature_columns and len(varlen_sparse_feature_columns) > 0:
        for feat in varlen_sparse_feature_columns:
            # if feat.name not in sparse_embedding:
            trainable = False if feat.embedding_name in ['ad_id', 'advertiser_id', ] else True
            trainable = False
            if feat.embedding_name in init_embed_dic:
                init_emb = init_embed_dic.get(feat.embedding_name)
                sparse_embedding[feat.embedding_name] = keras.layers.Embedding(feat.vocabulary_size, feat.embedding_dim,
                                                                  weights=[init_emb],
                                                                  trainable=trainable,
                                                                  embeddings_regularizer=keras.regularizers.l2(l2_reg),
                                                                  name=prefix + '_seq_emb_' + feat.name,
                                                                  mask_zero=seq_mask_zero)
            else:
                init_emb = keras.initializers.glorot_uniform()
                sparse_embedding[feat.embedding_name] = keras.layers.Embedding(feat.vocabulary_size, feat.embedding_dim,
                                                                  embeddings_initializer=init_emb,
                                                                  trainable=trainable,
                                                                  embeddings_regularizer=keras.regularizers.l2(l2_reg),
                                                                  name=prefix + '_seq_emb_' + feat.name,
                                                                  mask_zero=seq_mask_zero)
    return sparse_embedding


class SparseFeat(namedtuple('SparseFeat',
                            ['name', 'vocabulary_size', 'embedding_dim', 'use_hash', 'dtype', 'embedding_name',
                             'group_name'])):
    __slots__ = ()

    def __new__(cls, name, vocabulary_size, embedding_dim=4, use_hash=False, dtype="int32", embedding_name=None,
                group_name=DEFAULT_GROUP_NAME):
        if embedding_name is None:
            embedding_name = name
        if embedding_dim == "auto":
            embedding_dim = 6 * int(pow(vocabulary_size, 0.25))
        return super(SparseFeat, cls).__new__(cls, name, vocabulary_size, embedding_dim, use_hash, dtype,
                                              embedding_name, group_name)

    def __hash__(self):
        return self.name.__hash__()


class VarLenSparseFeat(namedtuple('VarLenSparseFeat',
                                  ['sparsefeat', 'maxlen', 'combiner', 'length_name', 'weight_name', 'weight_norm'])):
    __slots__ = ()

    def __new__(cls, sparsefeat, maxlen, combiner="mean", length_name=None, weight_name=None, weight_norm=True):
        return super(VarLenSparseFeat, cls).__new__(cls, sparsefeat, maxlen, combiner, length_name, weight_name,
                                                    weight_norm)

    @property
    def name(self):
        return self.sparsefeat.name

    @property
    def vocabulary_size(self):
        return self.sparsefeat.vocabulary_size

    @property
    def embedding_dim(self):
        return self.sparsefeat.embedding_dim

    @property
    def use_hash(self):
        return self.sparsefeat.use_hash

    @property
    def dtype(self):
        return self.sparsefeat.dtype

    @property
    def embedding_name(self):
        return self.sparsefeat.embedding_name

    @property
    def group_name(self):
        return self.sparsefeat.group_name

    def __hash__(self):
        return self.name.__hash__()

class DenseFeat(namedtuple('DenseFeat', ['name', 'dimension', 'dtype'])):
    __slots__ = ()

    def __new__(cls, name, dimension=1, dtype="float32"):
        return super(DenseFeat, cls).__new__(cls, name, dimension, dtype)

    def __hash__(self):
        return self.name.__hash__()

def build_input_features(feature_columns, prefix=''):
    input_features = OrderedDict()
    for fc in feature_columns:
        if isinstance(fc, SparseFeat):
            input_features[fc.name] = keras.Input(
                shape=(1,), name=prefix + fc.name, dtype=fc.dtype)
        elif isinstance(fc, DenseFeat):
            input_features[fc.name] = keras.Input(
                shape=(fc.dimension,), name=prefix + fc.name, dtype=fc.dtype)
        elif isinstance(fc, VarLenSparseFeat):
            input_features[fc.name] = keras.Input(shape=(fc.maxlen,), name=prefix + fc.name,
                                            dtype=fc.dtype)
            if fc.weight_name is not None:
                input_features[fc.weight_name] = keras.Input(shape=(fc.maxlen, 1), name=prefix + fc.weight_name,
                                                       dtype="float32")
            if fc.length_name is not None:
                input_features[fc.length_name] = keras.Input((1,), name=prefix + fc.length_name, dtype='int32')

        else:
            raise TypeError("Invalid feature column type,got", type(fc))

    return input_features


def embedding_lookup(sparse_embedding_dict, sparse_input_dict, sparse_feature_columns, return_feat_list=(),
                     mask_feat_list=(), to_list=False):
    group_embedding_dict = defaultdict(list)
    for fc in sparse_feature_columns:
        feature_name = fc.name
        embedding_name = fc.embedding_name
        if (len(return_feat_list) == 0 or feature_name in return_feat_list):
            lookup_idx = sparse_input_dict[feature_name]
            group_embedding_dict[fc.group_name].append(sparse_embedding_dict[embedding_name](lookup_idx))
    if to_list:
        return list(chain.from_iterable(group_embedding_dict.values()))
    return group_embedding_dict



class NoMask(keras.layers.Layer):
    def __init__(self, **kwargs):
        super(NoMask, self).__init__(**kwargs)

    def build(self, input_shape):
        # Be sure to call this somewhere!
        super(NoMask, self).build(input_shape)

    def call(self, x, mask=None, **kwargs):
        return x

    def compute_mask(self, inputs, mask):
        return None

def concat_func(inputs, axis=-1, mask=False):
    if not mask:
        inputs = list(map(NoMask(), inputs))
    if len(inputs) == 1:
        return inputs[0]
    else:
        return keras.layers.Concatenate(axis=axis)(inputs)

处理数据

import sys
sys.path.append('./')
import os
import numpy as np
import pandas as pd
import tqdm
import gc
from util import save_pickle, load_pickle, checkout_dir
import datetime

import multiprocessing


def process_na(click_df):
    click_df.fillna(0, inplace=True)
    click_df['product_id'] = click_df['product_id'].replace('\\N', 44315, )
    click_df['industry'] = click_df['industry'].replace('\\N', 336, )
    click_df = click_df.astype(int)
    click_df = click_df.sort_values(by=['user_id', 'time'], )
    return click_df


def gen_sequence(click_df, col, params, category):
    print('========= start ======================', col )
    print('train sentence start ', datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))
    col_click = click_df.groupby(['user_id'])[col].agg(list)
    sentence_list = col_click.values.tolist()
    save_pickle(sentence_list, os.path.join(params['processed_data_dir'], '{}_{}.pickle'.format(category, col)))
    print('train sentence done ', datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))
    return col_click

def gen_gensim_sentence(params, col, ):
    print(col, 'word2vec sentence start ', datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))
    train_sentence_list = load_pickle(os.path.join(params['processed_data_dir'], '{}_{}.pickle'.format('train',col)))
    test_sentence_list = load_pickle(os.path.join(params['processed_data_dir'], '{}_{}.pickle'.format('test',col)))
    sentence_list =  train_sentence_list+test_sentence_list
    print('sentence num: ', col, len(sentence_list))
    sentence_file_path = os.path.join(params['pre_train_embed_dir'], '{}_sentence.pickle'.format(col))
    print(sentence_file_path)
    if params['time_cut_len']==-1:
        with open(sentence_file_path, 'w') as fw:
            for sentence in tqdm.tqdm(sentence_list):
                cut_sent = []
                for w, g in itertools.groupby(sentence):
                    cut_sent.append('id_{}'.format(int(w)))
                fw.writelines(','.join(cut_sent) + '\n')
    else:
        with open(sentence_file_path, 'w') as fw:
            for sentence in tqdm.tqdm(sentence_list):
                cut_sent = []
                i = 0
                for w, g in itertools.groupby(sentence):
                    cut_sent.append('id_{}'.format(int(w)))
                    i+=1
                    if i == params['time_cut_len']:
                        fw.writelines(','.join(cut_sent) + '\n')
                        i=0
                        cut_sent = []
                if len(cut_sent)>0:
                    fw.writelines(','.join(cut_sent) + '\n')
    print('word2vec sentence done ', datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))

# =========================================
import gensim
from gensim.models.callbacks import CallbackAny2Vec
import itertools

class Load_Corpus_with_Iteration(object):  # 使用迭代器读取语料库
    def __init__(self, path):
        self.path = path

    def __iter__(self):
        for line in open(self.path, ):
            yield line.strip().split(',')

class log_callback(CallbackAny2Vec):
    '''Callback to print loss after each epoch.'''
    def __init__(self):
        self.epoch = 0
        self.loss_to_be_subed = 0

    def on_epoch_end(self, model):
        loss = model.get_latest_training_loss()
        loss_now = loss - self.loss_to_be_subed
        self.loss_to_be_subed = loss
        print('Loss after epoch {}: {}'.format(self.epoch, loss_now))
        self.epoch += 1

def get_word2vec_dictionaries(texts, filepath, embed_size, vocab_size):

    model = gensim.models.Word2Vec(texts, sg=1, iter=10, negative=5, sample=0.00001, size=embed_size,
                                   max_vocab_size=vocab_size, batch_words=10000,
                                   window=10, min_count=1, workers=multiprocessing.cpu_count(), compute_loss=True, callbacks=[log_callback()])
    # model = gensim.models.Word2Vec(texts, iter=10, size=embed_size, max_vocab_size=vocab_size,
    #                                window=10,  workers=6, compute_loss=True, callbacks=[log_callback()])
    vocab_list = [word for word, Vocab in model.wv.vocab.items()]  # 存储 所有的 词语
    print('vocab_list len: ', len(vocab_list))
    embeddings_matrix = np.zeros((vocab_size, embed_size))
    for w in vocab_list:
        embeddings_matrix[int(w.replace('id_', ''))] = model.wv[w]
    save_pickle(embeddings_matrix, filepath)
    return embeddings_matrix

def gensim_pre_train(params ):
    print('=============== gensim train: cut len {} ====='.format(params['time_cut_len']))
    for col in params['pre_train_cols']:
        print('==========={}=========='.format(col))
        sss = datetime.datetime.now()
        pre_train_embed_file_path = os.path.join(params['pre_train_embed_dir'], '{}_embed.pickle'.format(col))

        sentence_file_path = os.path.join(params['pre_train_embed_dir'], '{}_sentence.pickle'.format(col))
        sentences = Load_Corpus_with_Iteration(sentence_file_path)
        embeddings_matrix = get_word2vec_dictionaries(sentences, filepath=pre_train_embed_file_path, embed_size=params['embedding_size_dict'][col], vocab_size=params['embedding_vobsize_dict'][col])
        print(col, embeddings_matrix.shape)
        eee = datetime.datetime.now()
        print('cost time : ', (eee - sss).seconds / 60)
        print('=====================\n')


if __name__=='__main__':
    params = {
        'min_freq':1,
        'speat_feat_cols':['creative_id', 'ad_id', 'product_id', 'product_category', 'advertiser_id', 'industry', 'click_times', 'time'],
        'pre_train_cols': ['creative_id', 'advertiser_id', 'product_category', 'industry', 'product_id' ]
#                 'pre_train_cols': ['creative_id', 'advertiser_id', ]

    }
    params['time_cut_len'] = 100
    params['processed_data_dir'] = '../processed_data/minfreq-{}'.format(params['min_freq'])
    params['pre_train_embed_dir'] = '../pre_trian_emb/gensim/minfreq-{}-cut-{}-sg1-window-10-sample-1e-5/'.format(params['min_freq'], params['time_cut_len'])
    checkout_dir(params['processed_data_dir'], do_delete=False)
    checkout_dir(params['pre_train_embed_dir'], do_delete=False)

    # 训练数据
#     train_user_df = pd.read_csv('../ori_data/train_preliminary/user.csv')
#     train_click_df = pd.read_csv('../ori_data/train_preliminary/click_log.csv')
#     train_ad_df = pd.read_csv('../ori_data/train_preliminary/ad.csv')
# #     fusai_train_user_df = pd.read_csv('../ori_data/train_semi_final/user.csv')
# #     fusai_train_click_df = pd.read_csv('../ori_data/train_semi_final/click_log.csv')
# #     fusai_train_ad_df = pd.read_csv('../ori_data/train_semi_final/ad.csv')
#     # 训练数据concat
#     train_click_df = pd.merge(train_click_df, train_ad_df, on=['creative_id'], )
# #     fusai_train_click_df = pd.merge(fusai_train_click_df, fusai_train_ad_df, on=['creative_id'], )
# #     train_click_df = pd.concat([train_click_df, fusai_train_click_df], axis=0)
# #     train_user_df = pd.concat([train_user_df, fusai_train_user_df], axis=0).drop_duplicates()
# #     del fusai_train_user_df, fusai_train_click_df, fusai_train_ad_df
# #     gc.collect()

#     # 测试数据
#     test_click_df = pd.read_csv('../ori_data/test/click_log.csv')
#     test_ad_df = pd.read_csv('../ori_data/test/ad.csv')
#     test_click_df = pd.merge(test_click_df, test_ad_df, on=['creative_id'], how='left')
#     del test_ad_df
#     gc.collect()

#     # 数据处理
#     print('process train')
#     train_click_df = process_na(train_click_df)
#     print('user shape:', train_user_df.shape, 'click shape: ',train_click_df.shape, train_click_df['user_id'].unique().shape)
#     print('process test')
#     test_click_df = process_na(test_click_df)
#     print('click shape: ', test_click_df.shape)


#     for col in params['speat_feat_cols']:
#         col_click = gen_sequence(train_click_df, col, params,'train')
#     y = {}
#     col_click = pd.merge(col_click, train_user_df, on='user_id')
#     y['age'] = col_click['age'].values-1
#     y['gender'] = col_click['gender'].values-1
#     file_path = os.path.join(params['processed_data_dir'], 'train_y.pickle')
#     save_pickle(y, file_path)
#     del train_click_df, col_click, y
#     gc.collect()


#     for col in params['speat_feat_cols']:
#         col_click = gen_sequence(test_click_df, col, params,'test')
#     del test_click_df, col_click
#     gc.collect()


#     =================================================================================
    embedding_vobsize_dict = {
        'creative_id': 4445720 + 1,
        'ad_id': 3812202 + 1,
        'product_id': 44315 + 1,
        'product_category': 18 + 1,
        'advertiser_id': 62965 + 1,
        'industry': 336 + 1,
    }
    embedding_size_dict = {
        'creative_id': 128,
        'ad_id':128,
        'product_id': 64,
        'product_category': 64,
        'advertiser_id': 128,
        'industry': 64,
    }
    params['embedding_size_dict'] = embedding_size_dict
    params['embedding_vobsize_dict'] = embedding_vobsize_dict

    for col in params['pre_train_cols']:
        gen_gensim_sentence(params, col, )
    gensim_pre_train(params, )
    print(datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))

训练

# https://developer.nvidia-china.com/forum.php?mod=viewthread&tid=10722&extra=page%3D1
import sys
sys.path.append('./')
import os
import numpy as np
import pandas as pd
from util import save_pickle, load_pickle, checkout_dir

# =============================================================

import tensorflow as tf



params = {
    'target':['age'],
    'epochs':25,
    'batch_size':2,
    'hist_max_len':100,
    'is_train':True,
    'do_delete':True,
    'continue_train':False,

    'min_freq': 0,
    'speat_feat_cols': ['creative_id', 'advertiser_id',  'time'],
    'pre_train_cols': [ ],

    'example_nums':1000
}
params['time_cut_len'] = 100
params['processed_data_dir'] = '../processed_data/minfreq-{}'.format(params['min_freq'])
params['pre_train_embed_dir'] = '../pre_trian_emb/gensim/minfreq-{}-cut-{}-sg1-window-10-sample-1e-5'.format(
    params['min_freq'], params['time_cut_len'])

embedding_vobsize_dict = {
    'creative_id': 4445720 + 1,
    'ad_id': 3812202 + 1,
    'product_id': 44315 + 1,
    'product_category': 18 + 1,
    'advertiser_id': 62965 + 1,
    'industry': 336 + 1,
    'time':91+1,
}
embedding_size_dict = {
    'creative_id': 128,
    'ad_id': 128,
    'product_id': 64,
    'product_category': 64,
    'advertiser_id': 128,
    'industry': 64,
    'time':64
}

params['embedding_size_dict'] = embedding_size_dict
params['embedding_vobsize_dict'] = embedding_vobsize_dict

params['d_model'] = sum([v for k, v in params['embedding_size_dict'].items() if k in params['speat_feat_cols']])
print('input embedding size is :', params['d_model'])

# ================================================================================
print('Train dataset...')

record_defaults = ['', '', '', 0, 0]
def decode_features_line(line):
    items = tf.io.decode_csv(line, record_defaults, field_delim=';')
    features = []
    for i, col in enumerate(params['speat_feat_cols']):
        t = tf.strings.split([items[i]], sep=',').values
        t = tf.strings.to_number(t, tf.int32)
        features.append(t)
    return features

def _convert_features_to_dict(*el):
    base_list = params['speat_feat_cols']
    dicto = dict()
    for i in range(len(base_list)):
        dicto[base_list[i]] = el[i]
    # add transformer mask
    dicto['transformer_mask'] = tf.cast(tf.equal(el[0], 0), dtype=tf.int32)
    dicto['dynamic_rnn_len'] = tf.reduce_sum(tf.cast(tf.not_equal(el[0], 0), dtype=tf.int32), axis=-1)
    return dicto

def decode_labels_line(line):
    items = tf.io.decode_csv(line, record_defaults, field_delim=';')
    labels = []
    i = -2
    for col in params['target']:
        labels.append(items[i])
        i+=1
    # label = tf.one_hot(items[-1], depth=10)
    return labels

def _convert_labels_to_dict(*el):
    base_list = params['target']
    dicto = dict()
    for i in range(len(base_list)):
        dicto[base_list[i]] = el[i]
    return dicto


tfdata_train_file_path = '../processed_data/tfdata/train.csv'
data_set = tf.data.TextLineDataset(tfdata_train_file_path,  )
features_data_set = data_set.map(decode_features_line)
features_data_set = features_data_set.padded_batch(batch_size=params['batch_size'],
                                                   padded_shapes=(
                                                    tf.TensorShape([None,]),
                                                    tf.TensorShape([None,]),
                                                    tf.TensorShape([None,])
                                                    )
                                                   )

features_data_set = features_data_set.map(map_func=_convert_features_to_dict)

label_data_set = data_set.map(decode_labels_line)
label_data_set = label_data_set.batch(batch_size=params['batch_size'])
label_data_set = label_data_set.map(_convert_labels_to_dict)
final_data_set = tf.data.Dataset.zip((features_data_set, label_data_set))


iterator = final_data_set.make_one_shot_iterator()
next_element = iterator.get_next()
with tf.Session() as sess:
    for i in range(3):
        r = sess.run(next_element)
        print(i, r)



for i, item in enumerate(final_data_set):
    if i==5:
        break
    print('======', i)
    print(item)

node2vec

import sys
import os
import numpy as np
import pandas as pd
from util import save_pickle, load_pickle, checkout_dir, create_embedding_dict
import itertools
from collections import defaultdict
import datetime

import networkx as nx



import tqdm

def creat_graph_file(speat_feat_cols, params, pre_train_embed_dir):
    train_y, train_feats_dict, train_user_df = load_pickle(os.path.join(params['processed_data_dir'], 'train.pickle'))
    test_y, test_feats_dict, test_user_df = load_pickle(os.path.join(params['processed_data_dir'], 'test.pickle'))

    for name in speat_feat_cols:
        print(name)
        res = defaultdict(int)
        for arr in tqdm.tqdm(train_feats_dict[name]):
            new_values = [k for k, g in itertools.groupby(arr) ]
            for i in range(1, len(new_values)):
                res[(new_values[i-1], new_values[i])] += 1
        for arr in tqdm.tqdm(test_feats_dict[name]):
            new_values = [k for k, g in itertools.groupby(arr) ]
            for i in range(1, len(new_values)):
                res[(new_values[i-1], new_values[i])] += 1
        with open(os.path.join(pre_train_embed_dir, '{}.graph'.format(name)), 'w', encoding='utf-8') as fw:
            for k, v in res.items():
                fw.writelines('{} {} {}\n'.format(k[0], k[1], v))


params = {
    'walklen':50,
    'num_walks':3,
    'p':2,
    'q':0.5,
'min_freq':-1,
}
params['processed_data_dir'] = '..\\processed_data\\minfreq-{}'.format(params['min_freq'])
params['vobsize_file_path'] = '..\\encode_data\\vobsize.csv'

embedding_vobsize_dict = {}
with open(params['vobsize_file_path'], 'r') as fr:
    for line in fr.readlines():
        line = line.strip().split(':')
        if len(line) == 2:
            embedding_vobsize_dict[str(line[0])] = int(line[1]) + 1
print(embedding_vobsize_dict)
embedding_size_dict = {
    'creative_id': 128,
    'ad_id': 128,
    'product_id': 64,
    'product_category': 32,
    'advertiser_id': 128,
    'industry': 32,
}

# speat_feat_cols = ['creative_id', 'product_id', 'advertiser_id']
speat_feat_cols = ['advertiser_id', 'creative_id', ]
speat_feat_cols = ['creative_id', ]

pre_train_embed_dir = '..\\pre_trian_emb\\node2vec\\first-sg1-walken{}-num_walks{}-p{}-q{}'.format(params['walklen'], params['num_walks'], params['p'], params['q'])
checkout_dir(pre_train_embed_dir, do_delete=False)
creat_graph_file(speat_feat_cols, params, pre_train_embed_dir)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值