基于pyqt+OpenCV+神经网络算法实现人脸识别

文章《基于pyqt+OpenCV设计的用户密码或人脸识别登录GUI程序设计》介绍了利用pyqt+openCV实现人脸识别。该文章纯粹利用openCV提供的有关人脸识别的接口:人脸图像采集、模型参数学习、人脸推理(识别)等。

在该文章基础之上,我们作了一些修改:

  • 仍然以openCV完成人脸照片的采集(保存)
  • 利用python图像处理库PIL对图片进行尺寸修改(比如设置为150x150)
  • 自主实现人脸数据的采集:

       (1)利用python图像处理库PIL将图片转为一维像素数据(0〰️255),一张图片获得的像素数据为一个样本。

       (2)将所有图像数据打包为pkl格式文件,作为神经网络学习的输入数据。

  • 自主实现模型参数学习算法:选择两层神经网络模型,利用数值微分或误差反向传播法完成权重参数的更新。
  • 自主实现人脸推理(识别),选择softmax作为神经网络输出层的激活函数,输出最大值所对应的索引即为识别结果。

阅读完 《基于pyqt+OpenCV设计的用户密码或人脸识别登录GUI程序设计》和本文,你将获得以下收获:

  1. 利用pyqt开发GUI程序
  2. 借助openCV库,实现人脸识别
  3. 能实现将图片转数据及固定图片大小
  4. 能将自主开发的神经网络或其他机器学习算法实际运用到人脸识别应用中
  5. 能掌握将纯粹的黑白图片转换为自己需要的、能作为机器学习输入数据的方法
  6. 能自主实现类别标签(数字编号)与one-hot类型的转换

这里,直接给出源代码,由于对代码进行了广泛注释,所以就不额外对程序进行说明了。读者可根据自己的实际需求,找到对自己有用的代码,加以分析与利用。

备注:

程序的完整打包及软件操作截图见文章

# coding: utf-8
#----------主程序,也是启动程序-------------------------
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox,QWidget
from faces_input_frame import Ui_Dialog
import os

class Ui_Form(QWidget):  #将object改为QWidget,才能弹出消息对话框
    def __init__(self):
        super(Ui_Form,self).__init__() #用户添加代码
    def setupUi(self, Form):
        self.form=Form  #用户添加代码
        Form.setObjectName("Form")
        Form.setMinimumSize(QtCore.QSize(329, 230))
        Form.setMaximumSize(QtCore.QSize(400, 230))
        Form.setStyleSheet("")
        self.label = QtWidgets.QLabel(Form)
        self.label.setGeometry(QtCore.QRect(63, 43, 64, 16))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.label.setFont(font)
        self.label.setObjectName("label")
        self.label_2 = QtWidgets.QLabel(Form)
        self.label_2.setGeometry(QtCore.QRect(63, 80, 48, 16))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.label_2.setFont(font)
        self.label_2.setObjectName("label_2")
        self.lineEdit_2 = QtWidgets.QLineEdit(Form)
        self.lineEdit_2.setGeometry(QtCore.QRect(121, 80, 133, 20))
        self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)
        self.lineEdit_2.setCursorPosition(0)
        self.lineEdit_2.setObjectName("lineEdit_2")
        self.pushButton = QtWidgets.QPushButton(Form)
        self.pushButton.setGeometry(QtCore.QRect(70, 150, 75, 23))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.pushButton.setFont(font)
        self.pushButton.setObjectName("pushButton")
        self.pushButton_2 = QtWidgets.QPushButton(Form)
        self.pushButton_2.setGeometry(QtCore.QRect(170, 150, 75, 23))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.pushButton_2.setFont(font)
        self.pushButton_2.setObjectName("pushButton_2")
        self.checkBox = QtWidgets.QCheckBox(Form)
        self.checkBox.setGeometry(QtCore.QRect(63, 110, 151, 20))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.checkBox.setFont(font)
        self.checkBox.setObjectName("checkBox")
        self.lineEdit_3 = QtWidgets.QLineEdit(Form)
        self.lineEdit_3.setGeometry(QtCore.QRect(121, 41, 133, 20))
        self.lineEdit_3.setObjectName("lineEdit_3")
        self.pushButton_face_pass = QtWidgets.QPushButton(Form)
        self.pushButton_face_pass.setGeometry(QtCore.QRect(279, 100, 104, 41))
        self.pushButton_face_pass.setMaximumSize(QtCore.QSize(16777215, 16777213))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.pushButton_face_pass.setFont(font)
        self.pushButton_face_pass.setObjectName("pushButton_face_pass")
        self.pushButton_face_input = QtWidgets.QPushButton(Form)
        self.pushButton_face_input.setGeometry(QtCore.QRect(280, 31, 104, 41))
        self.pushButton_face_input.setMaximumSize(QtCore.QSize(16777215, 16777213))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.pushButton_face_input.setFont(font)
        self.pushButton_face_input.setObjectName("pushButton_face_input")

        self.retranslateUi(Form)
        self.pushButton.clicked.connect(self.close)
        self.pushButton_2.clicked.connect(self.open)
        self.pushButton_face_input.clicked.connect(self.faceinput)
        self.pushButton_face_pass.clicked.connect(self.facepass)

    def retranslateUi(self, Form):
        _translate = QtCore.QCoreApplication.translate
        Form.setWindowTitle(_translate("Form", "Form"))
        self.label.setText(_translate("Form", "用户名:"))
        self.label_2.setText(_translate("Form", "密码:"))
        self.pushButton.setText(_translate("Form", "取消"))
        self.pushButton_2.setText(_translate("Form", "确认"))
        self.checkBox.setText(_translate("Form", "记住用户名和密码"))
        self.pushButton_face_pass.setText(_translate("Form", "人脸识别登录"))
        self.pushButton_face_input.setText(_translate("Form", "人脸信息录入"))

    def open(self):
        #--------判断用户是否存在--------------
        fl = open('user_names.txt', 'r+',encoding='utf-8')
        pre_name = fl.read()
        print(pre_name)

        names = pre_name.split(',')
        fl.close()
        if self.lineEdit_3.text() in names:
            fl = open('password.txt', 'r+')
            password= fl.read()
            if self.lineEdit_2.text() ==password:
                reply=QMessageBox.information(self,'提示','请编写主程序',QMessageBox.Close)
            else:
                reply=QMessageBox.information(self,'提示','密码错误',QMessageBox.Close)


        else:
            reply=QMessageBox.information(self,'提示','用户不存在',QMessageBox.Close)

    def close(self):
        self.close()
    def faceinput(self,event):
        self.form.hide()
        Form1=QtWidgets.QDialog()
        ui=Ui_Dialog()
        ui.setupUi(Form1)
        Form1.show()
        Form1.exec_()
        self.form.show()   #子窗口关闭后,主窗口显示

    def facepass(self,event):
        import face_recognize
        get_name=face_recognize.recognize_face()#返回识别的人名
        if get_name=="unknown":
            reply = QMessageBox.information(self, '提示', '人脸识别失败', QMessageBox.Close)
        else:
            reply = QMessageBox.information(self, '提示', "欢迎您:"+get_name,QMessageBox.Close)
            print("编写其他程序")


if __name__=="__main__":
    import sys
    app=QtWidgets.QApplication(sys.argv)
    widget=QtWidgets.QWidget()
    ui=Ui_Form()
    ui.setupUi(widget)
    widget.show()
    sys.exit(app.exec_())
# -*- coding: utf-8 -*-
#------------fces_input_frame.py程序-----------
# Form implementation generated from reading ui file 'faces_input_frame.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
import face_recognize
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox,QWidget
class Ui_Dialog(QWidget):
    def setupUi(self, Dialog):
        self.form=Dialog #用户添加代码
        Dialog.setObjectName("Dialog")
        Dialog.resize(315, 104)
        Dialog.setMinimumSize(QtCore.QSize(315, 104))
        Dialog.setMaximumSize(QtCore.QSize(315, 104))
        Dialog.setAutoFillBackground(False)
        self.Button_Enter = QtWidgets.QPushButton(Dialog)
        self.Button_Enter.setGeometry(QtCore.QRect(221, 21, 75, 27))
        font = QtGui.QFont()
        font.setPointSize(14)
        self.Button_Enter.setFont(font)
        self.Button_Enter.setObjectName("Button_Enter")
        self.Button_Exit=QtWidgets.QPushButton(Dialog)
        self.Button_Exit.setGeometry(QtCore.QRect(221, 54, 75, 27))
        font = QtGui.QFont()
        font.setPointSize(14)
        self.Button_Exit.setFont(font)
        self.Button_Exit.setObjectName("Button_Exit")
        self.face_name=QtWidgets.QLabel(Dialog)
        self.face_name.setGeometry(QtCore.QRect(40, 20, 131, 16))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.face_name.setFont(font)
        self.face_name.setObjectName("face_name")
        self.face_name_frame=QtWidgets.QLineEdit(Dialog)
        self.face_name_frame.setGeometry(QtCore.QRect(30, 40, 167, 31))
        font = QtGui.QFont()
        font.setPointSize(12)
        self.face_name_frame.setFont(font)
        self.face_name_frame.setText("")
        self.face_name_frame.setObjectName("face_name_frame")

        self.retranslateUi(Dialog)
        self.Button_Enter.clicked.connect(self.Enter)
        self.Button_Exit.clicked.connect(self.ext)
    def retranslateUi(self, Dialog):
        _translate = QtCore.QCoreApplication.translate
        Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
        self.Button_Enter.setText(_translate("Dialog", "确认"))
        self.Button_Exit.setText(_translate("Dialog", "退出"))
        self.face_name.setText(_translate("Dialog", "请输入您的姓名:"))

    def Enter(self):
        fl = open('user_names.txt','a+')
        if self.face_name_frame.text()=="":
            #输入为空时
            reply = QMessageBox.information(self, '提示', '请输入有效用户名', QMessageBox.Ok)
        else:
            fl.write(self.face_name_frame.text()+',')
            fl.close()
            print("正在采集人脸照片")
            face_recognize.Collect_faces()
            print("正在采集人脸数据")
            face_recognize.Training_faces()
            print("正在学习模型")
            face_recognize.faces_parms()
            print("学习完毕")

    def ext(self,event):
        self.form.close()

if __name__ == "__main__":
    import sys
    app=QtWidgets.QApplication(sys.argv)
    Dialog=QtWidgets.QDialog()
    ui = Ui_Dialog()
    ui.setupUi(Dialog)
    Dialog.show()
    sys.exit(app.exec_())
# -*- coding: utf-8 -*-
#---------face_recognize.py程序-----------------
from functions import *
def Collect_faces():
    #本函数只完成人脸图像采集并保存为150x150的图片大小
    import cv2
    from PIL import Image
    # 调用笔记本内置摄像头,所以参数为0,如果有其他的摄像头可以调整参数为1,2
    # import numpy as np
    # import pandas as pd
    cap = cv2.VideoCapture(0)

    face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

    fl = open('user_names.txt', 'r+')
    pre_name=fl.read()
    name = pre_name.split(',')
    face_id=len(name) - 2#去掉逗号,表示某人的一些列照片
    fl.close()
    #face_id = input('\n enter user id:')  #输入序号,表示某人的一些列照片

    print('\n Initializing face capture. Look at the camera and wait ...')

    count = 0

    while True:

        # 从摄像头读取图片

        sucess, img = cap.read()

        # 转为灰度图片

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # 检测人脸

        faces = face_detector.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + w), (255, 0, 0))
            count += 1
            # 保存图像,从原始照片中截取人脸尺寸,并重新保存为150x150尺寸图片
            cv2.imwrite("Facedata/User." + str(face_id) + '.' + str(count) + '.jpg', gray[y: y + h, x: x + w]) #原始人脸图像
            PIL_img = Image.open("Facedata/User." + str(face_id) + '.' + str(count) + '.jpg').resize((150, 150), Image.ANTIALIAS).convert('L')  # 转为150x150大小的黑白图
            PIL_img.save("Facedata/User." +str(face_id) + '.' + str(count) + '.jpg') #先处理后保存150x150尺寸图片,覆盖原始人脸图片

            cv2.imshow('image', img)

        # 保持画面的持续。

        k = cv2.waitKey(1)

        if k == 27:  # 通过esc键退出摄像
            break

        elif count >=150:  # 得到150个样本后退出摄像
            break

    # 关闭摄像头
    cap.release()
    cv2.destroyAllWindows()


def Training_faces():
    from PIL import Image
    import numpy as np
    import pickle
    faceSamples = []  # 保存人脸数据的列表
    faceLables = []  # 保存人脸标签,不是one-hot
    fl = open('user_names.txt', 'r+')
    pre_name = fl.read()
    name = pre_name.split(',')
    face_id = len(name) - 1  # 去掉逗号,表示某人的一些列照片
    fl.close()
    print(face_id)
    for j in range(face_id):         #人数
        for i in range(150):  # 每个人的照片数量count=150
            PIL_img = Image.open("Facedata/User." + str(j) + '.' + str(i+1) + '.jpg').resize((150, 150),Image.ANTIALIAS).convert('L')  # 转为150x150大小的黑白图
            pre_img_numpy = np.array(PIL_img, 'uint8')  # 图片转为array数组
            flatten_img_numpy = pre_img_numpy.flatten()  # 转为一维
            faceSamples.append(flatten_img_numpy)  # 多张人脸数据
            faceLables.append(j)  # 多个人脸标签


    array_faces_data=np.array(faceSamples)  #列表转numpy数组
    array_faces_labels=np.array(faceLables) #有多少张图片就有多少个标签
    dict = {'train_img': array_faces_data,'train_label':array_faces_labels}  #字典形式
    pickle.dump(dict, open('faces_data.pkl','wb'))                           #保存人脸数据


def faces_parms():
    #模型参数学习
    import pickle
    from Two_layer_net import TwoLayerNet
    # 读入数据
    origin_data = pickle.load(open("faces_data.pkl", 'rb'))
    train_img = origin_data['train_img']  # 样本
    train_label = origin_data['train_label']  # 标签

    # 获取用户个数(类别个数)
    fl = open('user_names.txt', 'r+')
    pre_name = fl.read()
    name = pre_name.split(',')
    user_num = len(name) - 1  # 去掉逗号,表示用户数量
    # 输入层神经元数量为图像数据长度(train_img[1]),隐藏层神经元50个,神经网络输出层的神经元数量为用户个数
    network = TwoLayerNet(input_size=len(train_img[1]), hidden_size=5, output_size=user_num)

    iters_num = 10000  # 梯度法的更新次数
    train_size = train_img.shape[0]  # 样本数量
    batch_size = 5  # 批数量
    learning_rate = 0.1
    train_loss_list = []

    for i in range(iters_num):
        batch_mask = np.random.choice(train_size, batch_size)  # 在样本大小中随机选择100个数字
        x_batch = train_img[batch_mask]  # 取出100个数据
        t_batch = train_label[batch_mask]  # 及其对应的正确解标签
        # 计算梯度
        import time
        start = time.clock()
        # grad = network.numerical_gradient(x_batch, t_batch)
        grad = network.gradient(x_batch, t_batch)
        end = time.clock()

        # 更新参数
        for key in ('W1', 'b1', 'W2', 'b2'):
            network.params[key] -= learning_rate * grad[key]

        # 记录学习过程

        loss = network.loss(x_batch, t_batch)
        train_loss_list.append(loss)

    # 保存模型参数
    pickle.dump(network.params, open('faces_params.pkl', 'wb'))

def recognize_face():
    import cv2
    import numpy as np
    from PIL import Image
    import  pickle
    # 调用笔记本内置摄像头,所以参数为0,如果有其他的摄像头可以调整参数为1,2
    cap = cv2.VideoCapture(0)

    face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

    fl = open('user_names.txt', 'r+')
    pre_name = fl.read()
    name = pre_name.split(',')
    face_id = len(name) - 2  # 去掉逗号,表示某人的一些列照片
    fl.close()
    # face_id = input('\n enter user id:')  #输入序号,表示某人的一些列照片


    count = 0

    while True:

        # 从摄像头读取图片
        result="unknown"
        sucess, img = cap.read()

        # 转为灰度图片

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # 检测人脸

        faces = face_detector.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + w), (255, 0, 0))
            count += 1
            # 将识别的图片保存为150x150
            cv2.imwrite("Recognize_img/" + str(count) + '.jpg', gray[y: y + h, x: x + w])  # 原始人脸图像
            PIL_img = Image.open("Recognize_img/" + str(count) + '.jpg').resize((150, 150),Image.ANTIALIAS).convert('L')  # 转为150x150大小的黑白图

            PIL_img.save("Recognize_img/"  + str(count) + '.jpg')  # 先处理后保存150x150尺寸图片,覆盖原始人脸图片

            print("dd")
            # ---------------------进行识别------------------------------
            PIL_img = Image.open("Recognize_img/" + str(count) + '.jpg').resize((150, 150), Image.ANTIALIAS).convert(
                'L')  # 转为150x150大小的黑白图
            pre_img_numpy = np.array(PIL_img, 'uint8')  # 图片转为array数组
            flatten_img_numpy = pre_img_numpy.flatten()  # 转为一维,待推理数据
            # 打开模型参数
            faces_params = pickle.load(open("faces_params.pkl", 'rb'))
            # 预测
            W1, W2 = faces_params['W1'], faces_params['W2']
            b1, b2 = faces_params['b1'], faces_params['b2']
            a1 = np.dot(flatten_img_numpy, W1) + b1  # 加权和:xw+b
            z1 = sigmoid(a1)  # 激活函数:输出0-1
            a2 = np.dot(z1, W2) + b2
            y = softmax(a2)  # 输出层激活函数
            print(y)
            if y.max() > 0.55:  # 概率大于0.55
                p = np.argmax(y)  # 获取概率最高的元素的索引
                result = name[p]  # 获取识别名字
                cap.release()
                cv2.destroyAllWindows()  # 退出摄像头

                return result

            else:
                pass

        # 保持画面的持续。
        cv2.imshow('image', img)
        k = cv2.waitKey(1)

        if k == 27:  # 通过esc键退出摄像
            break

        elif count >= 5:  # 识别5次后退出
            break

    # 关闭摄像头
    cap.release()
    cv2.destroyAllWindows()
    return result   #返回识别结果:人名或“unknown”
# coding: utf-8
#--------functions.py程序----------
import numpy as np
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def sigmoid_grad(x):
    return (1.0 - sigmoid(x)) * sigmoid(x)

def softmax(x):
    #之前的文章讨论过对softmax进行改进,防止数值过大而溢出
    if x.ndim == 2:   #二维时,溢出对策
        x = x.T       #转置
        x = x - np.max(x, axis=0) #axis=0,表示取每一列的最大值
        y = np.exp(x) / np.sum(np.exp(x), axis=0)  #axis=0,将每列的各值相加,
        return y.T     #再转置回去

    x = x - np.max(x) # 一维时,溢出对策
    return np.exp(x) / np.sum(np.exp(x))

def cross_entropy_error(y, t):
    if y.ndim == 1:  #y的维度为1,即单个数据
        t = t.reshape(1, t.size)  #改变数据形状(n,)变为(1,n)
        y = y.reshape(1, y.size)
    batch_size = y.shape[0]   #获取批数量
    return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size  #交叉熵误差(正确解标签对应的值越大,则误差越小,学习顺利)
def numerical_gradient(f, x):
#数据微分实现梯度更新
    h = 1e-4  # 0.0001
    grad = np.zeros_like(x)
    for idx in range(x.shape[0]):
        tmp_val = x[idx]
        x[idx]=tmp_val + h
        fxh1 = f(x)  # f(x+h)
        x[idx] = tmp_val - h
        fxh2 = f(x)  # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2 * h)
        x[idx] = tmp_val  # 还原值
    return grad
# coding: utf-8
#----------Two_layer_net.py程序-----------------
import  numpy as np
from functions import *

class TwoLayerNet:
#我们构建一个两层神经网络(三层神经元,但只有两层神经网络有权),关于神经网络的知识,读者可以去阅读作者以前发布的文章
    def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
        # 初始化权重:输入数据的大小,隐藏层大小,输出层大小
        self.params = {}  #保存神经网络的初始参数
        self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)  #初始偏执为0
        self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)
    def predict(self, x):
        #识别推理函数,参数x为图像数据train_img
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']
    
        a1 = np.dot(x, W1) + b1  #加权和:xw+b
        z1= sigmoid(a1)         #激活函数:输出0-1
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)        #输出层激活函数
        
        return y               #返回每一种分类所对应的数值
        
    # x:输入数据, t:监督数据
    def loss(self, x, t):
        y=self.predict(x)

        return cross_entropy_error(y, t)  #返回损失函数的值
    
    def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)  #获取输出最大值所对应的索引
        t = np.argmax(t, axis=1)  #获取正确解标签所在的位置
        
        accuracy = np.sum(y == t) / float(x.shape[0])
        return accuracy          #返回识别精度
        
    # x:输入数据, t:监督数据
    def numerical_gradient(self, x, t):
        #数值微分
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
        grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
        grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
        grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
        
        return grads


    def gradient(self, x, t):
        #误差反向传播法,求Affine/Softmax层的反向传播
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']
        grads = {}

        batch_num = x.shape[0]

        # forward
        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        # backward

        one_hot_t= np.zeros_like(y)  #生成和y形状一样的元素为零的数组
        for j, i in zip(range(t.size), t):
            one_hot_t[j][i] = 1      #变为one-hot类型标签:j表示样本,i表示标签索引
        dy=(y-one_hot_t)/batch_num
        grads['W2'] = np.dot(z1.T, dy)
        grads['b2'] = np.sum(dy, axis=0)

        da1=np.dot(dy, W2.T)
        dz1=sigmoid_grad(a1) * da1
        grads['W1']=np.dot(x.T, dz1)
        grads['b1']=np.sum(dz1, axis=0)

        return grads

欢迎关注公众号“Py生活”,学知识,享生活

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值