人脸清晰度评价指标---检测发蒙照片

实验摸索过程:

逆光拍摄数据集:
1.sort ---1   eye+eyebrow
2.tenengrade ---1   eyebrow
3.梯度  ---1   eyebrow
4.ssim ---1   eyebrow

尝试了:
1.通过直方图的规律训练分类模型,行不通;
2.计算逆光度的方法,在强烈逆光的情况下可行,我们的数据下也行不通;(场景不适用)

问题:
1.照片眼睛是睁开的,所以眼部roi(eye)效果不好;--1 选眉毛
2.选择眉毛roi的话,有一定的影响因素,比如:画眉;--1  接受
3.sort在两个数据集上效果相差很大; --0 需要用实际数据
4.现在基本只有sort有效果。 --1  加特征


待做:
1.确定阈值,计算指标的准确度,分析错误的原因; --1
2.几个指标综合起来,训练一个分类模型;--1
3.把指标的值归一化,做成回归模型;--0 (标签难打)
4.挖掘新特征 --1


1.频域 --1  可以加一个傅里叶变换低通滤波,然后计算sort
2.ssim加发白--  blur/加亮度  --1  无效
3.锐化   --1  无效
4.扩大roi  --1  无效
5.测试新的准确度 --1 
6.采集新数据,用新数据训练 --0

最终实现步骤:截取眉毛roi区域,然后计算各种特征,把特征放到svm进行训练和预测。

1.截取roi

import glob
from PIL import Image
import os
import numpy as np
import xlrd
import xlutils.copy
import dlib
import cv2
from PIL import Image
import numpy as np
import time
import imutils


def get_roi(img,arr):
    image=np.copy(img)

    # # roi1--眼睛
    # left_point1 = (arr[17][0], arr[17][1])
    # left_point2 = (arr[21][0], arr[28][1])

    # left_w=left_point2[0] - left_point1[0]
    # left_h=left_point2[1] - left_point1[1]

    #
    # # roi2--眉毛
    # left_point1 = (arr[18][0], arr[19][1]-30)
    # left_point2 = (arr[21][0], arr[17][1])
    #
    # left_w=left_point2[0] - left_point1[0]
    # left_h=left_point2[1] - left_point1[1]
    
    # # roi3--人脸1
    # left_point1 = (arr[1][0], arr[20][1]-100)
    # left_point2 = (arr[15][0], arr[8][1])
    #
    # left_w=left_point2[0] - left_point1[0]
    # left_h=left_point2[1] - left_point1[1]

    # roi3--人脸2
    left_point1 = (arr[18][0], arr[20][1]-50)
    left_point2 = (arr[25][0], arr[28][1])

    left_w=left_point2[0] - left_point1[0]
    left_h=left_point2[1] - left_point1[1]

    crop_left = image[left_point1[1]:left_point1[1]+left_h, left_point1[0]:left_point1[0]+left_w]
    # crop_right = image[right_point1[1]:right_point1[1]+right_h, right_point1[0]:right_point1[0]+right_w]

    return crop_left
   
def face_detect(pic):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('class/shape_predictor_68_face_landmarks.dat')
    img = np.copy(pic)
    # img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    
    # 人脸数rects
    points = []
    rects = detector(img, 1)
    if len(rects)!=1:
        print("face detection fail!")
        
    else:
        landmarks = np.matrix([[p.x, p.y] for p in predictor(img, rects[0]).parts()])
        for idx, point in enumerate(landmarks):
            x = point[0, 0]
            y = point[0, 1]
            points.append([x, y])
        
            # 画图和点
            cv2.circle(img, (x,y), 9, (0, 255, 0), thickness=-1, lineType=cv2.FILLED)
            cv2.putText(img, str(idx), (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 2,cv2.LINE_AA)

    return img, np.array(points)



n=0
path = "img/0/"
save="img/12/"
for root,dirs,files in os.walk(path):
    for file in files:
        if file.endswith('jpg'):
            file_name=root+"/"+file
            n+=1
            print(n,file)
            # 人脸检测,获取坐标点
            pic=cv2.imread(file_name)
            img_face,point=face_detect(pic)   #人脸检测
            
            if len(point)!=0:
                # 1.获取眼睛roi
                crop_left=get_roi(pic,point)
                if crop_left.shape[0]!=0 and crop_left.shape[1]/crop_left.shape[0]>5:
                    print("尺寸不对!")
                    continue
                cv2.imwrite(save+file,crop_left)

2.计算所有特征

import cv2
import numpy as np
from PIL import Image
import os
import shutil
import matplotlib.pyplot as plt
from skimage import measure

# 1.grad
def del_back(gray,sobelx):
    w=gray.shape[0]
    h=gray.shape[1]

    data=[]
    a=[]
    b=[]
    for i in range(w):
        for j in range(h):
            if gray[i,j]>30 and sobelx[i,j]<250:
                data.append(sobelx[i,j])

    return data

def get_grad(file_name,angel):
    # 读取图片
    gray = Image.open(file_name).convert('L')

    # 旋转图片
    # angle=[30,60,90,120,150,180]
    gray = gray.rotate(angel)
    gray=np.array(gray)

    # 计算梯度
    sobelx=cv2.Sobel(gray,cv2.CV_64F,dx=1,dy=0)
    sobelx=cv2.convertScaleAbs(sobelx)

    data=del_back(gray,sobelx)
    grad1=np.mean(data)
    grad2=np.var(data)
    
    # cv2.imshow("img2",gray)
    # cv2.waitKey(0)

    return grad1,grad2

# 2.tenengrad
def tenengrad1(img, ksize=3):
    Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize)
    Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize)
    FM = Gx*Gx + Gy*Gy
    mn = cv2.mean(FM)[0]

    # # 方法2
    # mn=0
    # w, h = FM.shape
    # for i in range(w):
    #     for j in range(h):
    #         mn+=math.sqrt(FM[i,j])
    
    if np.isnan(mn):
        return np.nanmean(FM)
    return mn

#3.ssim—均值滤波
def match_blur(imfil2):
    img2 = cv2.imread(imfil2)
    blur = cv2.blur(img2, (15, 15), 0)   #核可调整

    img1=cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    s = measure.compare_ssim(img1, img2)
    return s

# 4.sort
def get_gray(gray):
    w=gray.shape[0]
    h=gray.shape[1]
    total=w*h
    # 获取所有像素点的灰度值
    data=[]
    for i in range(w):
        for j in range(h):
            data.append(gray[i,j])

    # 排序,返回0%-99%的点
    data.sort(reverse=False)
    index=int(total*(1/5000))
    value=data[index]
    total=np.sum(data[:100])

    return value, total

def dilate(file_name,value):
    #读取图片,转灰度
    img=cv2.imread(file_name)
    img2=np.copy(img)
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    w=gray.shape[0]
    h=gray.shape[1]

    # 二值化
    thrd_img=np.copy(gray)
    for i in range(w):
        for j in range(h):
            if thrd_img[i,j]<=value:
                thrd_img[i,j]=255
            else:
                thrd_img[i,j]=0

    # 执行膨胀
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (11, 11))
    dilate = cv2.dilate(thrd_img, kernel)

    # 膨胀—二值化图
    result=dilate-thrd_img

    # 计算膨胀出来的点,在灰度图上的均值
    dilate_data=[]
    for i in range(w):
        for j in range(h):
            if result[i,j]==255:
                dilate_data.append(gray[i,j])
                img2[i,j]=255
    avg=np.mean(dilate_data)

    # cv2.imshow("src",img)         
    # cv2.imshow("thrd_img",thrd_img)
    # cv2.imshow("dilate",dilate)
    # cv2.imshow("result",result)
    # cv2.imshow("img2",img2)
    # cv2.waitKey(0)

    return avg,img

# 5.傅里叶
def fuliye(img):
    #快速傅里叶变换算法得到频率分布
    f = np.fft.fft2(img)
    #默认结果中心点位置是在左上角,
    #调用fftshift()函数转移到中间位置
    fshift = np.fft.fftshift(f)       
    #fft结果是复数, 其绝对值结果是振幅
    fimg = np.log(np.abs(fshift))


    # # 1.高通滤波——把中心的低频信息都去掉,保留周围的高频信息
    # area=3
    # rows, cols = img.shape
    # crow, ccol = int(rows/2), int(cols/2)
    # fshift[crow-area:crow+area, ccol-area:ccol+area] = 0


    # 2.低通滤波——保留中心的低频信息,把周围的高频信息都去掉
    area=5
    rows, cols = img.shape
    crow,ccol = int(rows/2), int(cols/2)
    mask = np.zeros((rows, cols), np.uint8)
    mask[crow-area:crow+area, ccol-area:ccol+area] = 1
    fshift = fshift * mask

    #傅里叶逆变换
    ishift = np.fft.ifftshift(fshift)
    iimg = np.fft.ifft2(ishift)
    iimg = np.abs(iimg)

    return iimg

# 归一化
def normal(test_data):
    nums=test_data.shape[1]-1
    for i in range(nums):
        a=test_data[:,i]
        # test_data[:,i]=(a-np.min(a))/(np.max(a)-np.min(a))   # 最大最小值
        test_data[:,i]=(a-np.mean(a)) / np.std(a)   # 0均值,1方差

    return test_data

def get_feature(file_name):
    img=cv2.imread(file_name)
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    # 1.tenengrad1
    tenengrad=tenengrad1(gray)

    # 2.grad
    grad,_=get_grad(file_name,0)

    # 3.sort
    value,_=get_gray(gray)
    if value<40 or value>50:   #第一次判断阈值:<20清晰,>30模糊
        sort=value
    else:
        avg,img=dilate(file_name,value)   #进行二次计算,膨胀
        dif=avg-value        
        # 第二次判断阈值:15,<15模糊,>15清晰
        if dif>15:
            sort=40
        else:
            sort=50

    # 4.ssim
    ssim = match_blur(file_name)

    # 5.sort_fly
    img_fly=fuliye(gray)
    sort_fly,_=get_gray(img_fly)

    return tenengrad, grad, sort, ssim, sort_fly



feature=[]
file_list=[]
n=0
f1=open("img/train.txt",'w+')
path = "img/1/"
for file in os.listdir(path):
    file_list.append(file)
    file_name=path+file
    mask=file.split(' ')[0]
    if mask=='blur':
        label=0
    else:
        label=1

    tenengrad, grad, sort, ssim, sort_fly = get_feature(file_name)
    feature.append([tenengrad, grad, sort, ssim, sort_fly, label])
    print(file, [tenengrad, grad, sort, ssim, sort_fly, label])

data=np.array(feature)  # 转格式
# data=normal(test_data)  # 归一化
print(data.shape)

for i,line in enumerate(data):
    a=file_list[i]+'  '+str(line[0])+'  '+str(line[1])+'  '+str(line[2])+'  '+ \
    str(line[3])+'  '+str(int(line[4]))+'  '+str(int(line[5]))+"\n"
    f1.write(a)
f1.close()

3.svm模型训练

from sklearn.model_selection import *
from numpy import *
import numpy as np
import cv2
import matplotlib.pyplot as plt
import shutil

def normal(data):
    nums=data.shape[1]
    for i in range(nums):
        a=data[:,i]
        # print(np.min(a), np.max(a), np.mean(a), np.std(a))

        # data[:,i]=(a-np.min(a))/(np.max(a)-np.min(a))   # 最大最小值
        data[:,i]=(a-np.mean(a)) / np.std(a)   # 0均值,1方差   mean=[11.37,45.67,0.78]  std=[3.59,21.96,0.09]
    return data

def normal2(data):
    # tenengrad, grad, sort, ssim, sort_fly
    min_list=[82.24, 3.85, 8, 0.51, 11]
    max_list=[3624.92, 21.18, 102, 0.98, 103]
    mean_list=[961.48, 11.25, 45.6, 0.79, 53.59]
    std_list=[619.03, 3.54, 21.5, 0.09, 20.59]

    # min_list=[8,11]
    # max_list=[102,103]
    # mean_list=[45.6,53.59]
    # std_list=[21.5,20.59]

    nums=data.shape[1]
    for i in range(nums):
        min_value=min_list[i]
        max_value=max_list[i]
        mean=mean_list[i]
        std=std_list[i]

        a=data[:,i]
        # data[:,i]=(a-min_value)/(max_value-min_value)   # 最大最小值
        data[:,i]=(a-mean) / std  
        
    # data=np.clip(data,0,1)
    return data

def loadDataSet(fileName):
    dataMat = []
    labelMat = []
    with open(fileName) as fr:
        for line in fr.readlines():
            line = line.strip().split('  ')

            # tenengrad, grad, sort, ssim, sort_fly
            dataMat.append([float(line[1]), float(line[2]), float(line[3]), float(line[4]), float(line[5])])
            # dataMat.append([float(line[3]),float(line[5])])

            labelMat.append([int(line[6])])
    return dataMat, labelMat


model_path='data/train/txt2/model1/'
#加载训练集
train_data,train_label = loadDataSet("data/train/txt2/train-shuffle.txt")
train_data = mat(train_data)
train_data=np.array(train_data, dtype='float32')
train_data=normal2(train_data)   # 归一化
train_label = mat(train_label)
print(train_data.shape, train_label.shape)

#加载测试集
test_data,test_label = loadDataSet("data/train/txt2/test-shuffle.txt")   #1.加载一个txt数据集
test_data = mat(test_data)
test_data=np.array(test_data, dtype='float32')
test_data=normal2(test_data)   # 归一化
test_label=mat(test_label)
print(test_data.shape, test_label.shape)


acc=[]
for i in range(1,6):
    train_features, val_features, train_labels, val_labels = \
    train_test_split(train_data, train_label, test_size=0.2, random_state=i+1)
    # print(train_labels.shape, np.sum(train_labels), val_labels.shape, np.sum(val_labels))
    print(type(train_labels))
    #createSVM分类器
    svm = cv2.ml.SVM_create()
    svm.setType(cv2.ml.SVM_C_SVC)  # SVM类型
    svm.setKernel(cv2.ml.SVM_LINEAR) # 线性核
    svm.setC(1e-5)

    # 训练
    # ret = svm.train(train_features, cv2.ml.ROW_SAMPLE, train_labels)
    ret = svm.trainAuto(train_features, cv2.ml.ROW_SAMPLE, train_labels, kFold=5, Cgrid=svm.getDefaultGridPtr(cv2.ml.SVM_C))
    svm.save(model_path+'model_'+str(i) +'.xml')
    vec = svm.getSupportVectors()
    print("支持向量最终结果:", vec)


    # 测试
    (ret, res) = svm.predict(test_data)
    # print(res)

    # 准确率
    n=0
    lens=len(test_data)
    for index in range(lens):
        if res[index]==test_label[index]:
            n=n+1
    Accuracy=n/lens
    acc.append(Accuracy)
    print("第{}次训练,准确度为:{}".format(i, Accuracy))
print("平均准确度为:", np.mean(acc))

4.实时测试(可以自己把读取图片改成视频流)

import cv2
import numpy as np
from PIL import Image
import os
import shutil
import matplotlib.pyplot as plt
from skimage import measure
from numpy import *
import numpy as np
import cv2
import matplotlib.pyplot as plt


def get_mask(file_name, res):
    mask=0
    label=file_name.split(' ')[0]
    pred=int(res[0][0])

    if label=="clear" and pred!=1:
        mask=1
        # shutil.copy(file_name,error_name)

    if label=="sharp" and pred!=1:
        mask=1
        # shutil.copy(file_name,error_name)

    if label=="blur" and pred!=0:
        mask=1
        # shutil.copy(file_name,error_name)

    if mask==1:
        print(file_name, pred)

    return mask

# 1.grad
def del_back(gray,sobelx):
    w=gray.shape[0]
    h=gray.shape[1]

    data=[]
    a=[]
    b=[]
    for i in range(w):
        for j in range(h):
            if gray[i,j]>30 and sobelx[i,j]<250:
                data.append(sobelx[i,j])

    return data

def get_grad(file_name,angel):
    # 读取图片
    gray = Image.open(file_name).convert('L')

    # 旋转图片
    # angle=[30,60,90,120,150,180]
    gray = gray.rotate(angel)
    gray=np.array(gray)

    # 计算梯度
    sobelx=cv2.Sobel(gray,cv2.CV_64F,dx=1,dy=0)
    sobelx=cv2.convertScaleAbs(sobelx)

    data=del_back(gray,sobelx)
    grad1=np.mean(data)
    grad2=np.var(data)
    
    # cv2.imshow("img2",gray)
    # cv2.waitKey(0)

    return grad1,grad2

# 2.tenengrad
def tenengrad1(img, ksize=3):
    Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize)
    Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize)
    FM = Gx*Gx + Gy*Gy
    mn = cv2.mean(FM)[0]

    # # 方法2
    # mn=0
    # w, h = FM.shape
    # for i in range(w):
    #     for j in range(h):
    #         mn+=math.sqrt(FM[i,j])
    
    if np.isnan(mn):
        return np.nanmean(FM)
    return mn

#3.ssim—均值滤波
def match_blur(imfil2):
    img2 = cv2.imread(imfil2)
    blur = cv2.blur(img2, (15, 15), 0)   #核可调整

    img1=cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    s = measure.compare_ssim(img1, img2)
    return s

# 4.sort
def get_gray(gray):
    w=gray.shape[0]
    h=gray.shape[1]
    total=w*h
    # 获取所有像素点的灰度值
    data=[]
    for i in range(w):
        for j in range(h):
            data.append(gray[i,j])

    # 排序,返回0%-99%的点
    data.sort(reverse=False)
    index=int(total*(1/5000))
    value=data[index]
    total=np.sum(data[:100])

    return value, total

def dilate(file_name,value):
    #读取图片,转灰度
    img=cv2.imread(file_name)
    img2=np.copy(img)
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    w=gray.shape[0]
    h=gray.shape[1]

    # 二值化
    thrd_img=np.copy(gray)
    for i in range(w):
        for j in range(h):
            if thrd_img[i,j]<=value:
                thrd_img[i,j]=255
            else:
                thrd_img[i,j]=0

    # 执行膨胀
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (11, 11))
    dilate = cv2.dilate(thrd_img, kernel)

    # 膨胀—二值化图
    result=dilate-thrd_img

    # 计算膨胀出来的点,在灰度图上的均值
    dilate_data=[]
    for i in range(w):
        for j in range(h):
            if result[i,j]==255:
                dilate_data.append(gray[i,j])
                img2[i,j]=255
    avg=np.mean(dilate_data)

    # cv2.imshow("src",img)         
    # cv2.imshow("thrd_img",thrd_img)
    # cv2.imshow("dilate",dilate)
    # cv2.imshow("result",result)
    # cv2.imshow("img2",img2)
    # cv2.waitKey(0)

    return avg,img

def get_feature(file_name):
    img=cv2.imread(file_name)
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    # 1.tenengrad1
    tenengrad=tenengrad1(gray)

    # 2.grad
    grad,_=get_grad(file_name,0)

    # 3.sort
    value,_=get_gray(gray)
    if value<40 or value>50:   #第一次判断阈值:<20清晰,>30模糊
        sort=value
    else:
        avg,img=dilate(file_name,value)   #进行二次计算,膨胀
        dif=avg-value        
        # 第二次判断阈值:15,<15模糊,>15清晰
        if dif>15:
            sort=40
        else:
            sort=50

    # 4.ssim
    ssim = match_blur(file_name)

    # 5.sort_fly
    img_fly=fuliye(gray)
    sort_fly,_=get_gray(img_fly)

    return tenengrad, grad, sort, ssim, sort_fly

# 5.傅里叶
def fuliye(img):
    #快速傅里叶变换算法得到频率分布
    f = np.fft.fft2(img)
    #默认结果中心点位置是在左上角,
    #调用fftshift()函数转移到中间位置
    fshift = np.fft.fftshift(f)       
    #fft结果是复数, 其绝对值结果是振幅
    fimg = np.log(np.abs(fshift))


    # # 1.高通滤波——把中心的低频信息都去掉,保留周围的高频信息
    # area=3
    # rows, cols = img.shape
    # crow, ccol = int(rows/2), int(cols/2)
    # fshift[crow-area:crow+area, ccol-area:ccol+area] = 0


    # 2.低通滤波——保留中心的低频信息,把周围的高频信息都去掉
    area=5
    rows, cols = img.shape
    crow,ccol = int(rows/2), int(cols/2)
    mask = np.zeros((rows, cols), np.uint8)
    mask[crow-area:crow+area, ccol-area:ccol+area] = 1
    fshift = fshift * mask

    #傅里叶逆变换
    ishift = np.fft.ifftshift(fshift)
    iimg = np.fft.ifft2(ishift)
    iimg = np.abs(iimg)

    return iimg

def normal2(data):
    # tenengrad, grad, sort, ssim, sort_fly
    min_list=[82.24, 3.85, 8, 0.51, 11]
    max_list=[3624.92, 21.18, 102, 0.98, 103]
    mean_list=[961.48, 11.25, 45.6, 0.79, 53.59]
    std_list=[619.03, 3.54, 21.5, 0.09, 20.59]

    # min_list=[8,11]
    # max_list=[102,103]
    # mean_list=[45.6,53.59]
    # std_list=[21.5,20.59]

    nums=data.shape[1]
    for i in range(nums):
        min_value=min_list[i]
        max_value=max_list[i]
        mean=mean_list[i]
        std=std_list[i]

        a=data[:,i]
        # data[:,i]=(a-min_value)/(max_value-min_value)   # 最大最小值
        data[:,i]=(a-mean) / std  
        
    # data=np.clip(data,0,1)
    return data


# 创建分类器
svm = cv2.ml.SVM_create()
svm.setType(cv2.ml.SVM_C_SVC)  # SVM类型
svm.setKernel(cv2.ml.SVM_LINEAR) # 使用线性核
svm.setC(1e-5)
svm = cv2.ml.SVM_load("data/train/txt2/model1/model_5.xml")

acc=[]
n=0
path = "data/data1/eyebrow/"
for file in os.listdir(path):
    file_name=path+file
    
    tenengrad, grad, sort, ssim, sort_fly = get_feature(file_name)
    
    line=[tenengrad, grad, sort, ssim, sort_fly]
    line=np.reshape(np.array(line, dtype='float32'),(-1,len(line)))
    line=normal2(line)

    # 测试
    (ret, res) = svm.predict(line)
    # print(file, res[0])

    mask=get_mask(file,res)
    acc.append(mask)
print("acc:", 1-np.mean(acc))

备注:其他特征尝试

import cv2
import numpy as np
import math
import time
import os
from skimage import filters
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt

# 1.Brenner 梯度函数
def brenner(img):
    shape = np.shape(img)
    out = 0
    for x in range(0, shape[0]-2):
    	for y in range(0, shape[1]):
            out+=(int(img[x+2,y])-int(img[x,y]))**2
    return out

# 2.SMD2(灰度方差乘积)
def SMD2(img):
	shape = np.shape(img)
	out = 0
	for x in range(0, shape[0]-1):
	    for y in range(0, shape[1]-1):
	        out+=math.fabs(int(img[x,y]) - int(img[x+1,y])) * math.fabs(int(img[x,y] - int(img[x,y+1])))
	return out

# 3.tenengrad
def tenengrad1(img, ksize=3):
    Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize)
    Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize)
    FM = Gx*Gx + Gy*Gy
    mn = cv2.mean(FM)[0]

    # # 方法2
    # mn=0
    # w, h = FM.shape
    # for i in range(w):
    #     for j in range(h):
    #         mn+=math.sqrt(FM[i,j])
    
    if np.isnan(mn):
        return np.nanmean(FM)
    return mn

def tenengrad2(img):
    f = np.matrix(img)

    tmp = filters.sobel(f)
    source=np.sum(tmp**2)
    source=np.sqrt(source)

    return source

# 5.Kurtosis 梯度函数
def kurtosis(img):
    shape = np.shape(img)
    N=shape[0]*shape[1]   #N个点

    data=[]
    for x in range(0, shape[0]):
        for y in range(0, shape[1]):
            data.append(int(img[x,y]))

    avg=sum(data)/N
    s4=np.std(data)**4   #标准差(4次方)

    data2=[]
    for x in range(0, shape[0]):
        for y in range(0, shape[1]):
            data2.append(abs((img[x,y]-avg))**4)
            
    out1=sum(data2)/N
    out=out1/s4-3
    
    kurtosis = stats.kurtosis(data)
    # print(kurtosis)
    
    return kurtosis

def frequency(img):
    # 转换傅里叶
    f = np.fft.fft2(img)
    fshift = np.fft.fftshift(f)
    result = 20*np.log(np.abs(fshift))
    # print(result.shape)


    num=15   #画圆圈数量
    x0=int(result.shape[0]/2)   #中心点x
    y0=int(result.shape[1]/2)   #中心点y
    r0=int(min(x0,y0)/num)   #每次半径的涨幅


    x=[]
    y=[]
    for i in range(1,num):
        r=r0*i   #半径
        data=[]
        #每隔angle度,算一次坐标点
        for angle in range(0,360,36):
            x1 = int(x0 + r * np.cos(angle * np.pi / 180))
            y1 = int(y0 + r * np.sin(angle * np.pi / 180))
            data.append(result[x1,y1])   #极坐标的傅里叶值

        avg=int(np.mean(data))    #极坐标的均值
        x.append(r)
        y.append(avg)

    x = np.array(x)
    xx = pd.DataFrame({"k": x})
    yy = pd.Series(y)
    res = pd.ols(y=yy, x=xx)


    # #建立对象
    # fig = plt.figure(figsize=(8,6))
    # ax = fig.add_subplot()
    #
    # #画图
    # plt.plot(x,y,'o-',label=u"线条")    #画图
    # plt.show()
    
    return  res.beta[0]


# 5项指标:SMD2/Brenner/Tenengrad/峰度(kurtosis)/频率谱(frequency)
f1=open('E:/data/ng/data/gg-brow.txt',"w+")
path="E:/data/ng/data/eyebrow/"
for file in os.listdir(path):
    start=time.time()

    img=cv2.imread(path+file)
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	# print(img.shape,gray.shape)
   
    fm1=brenner(gray)
    fm2=SMD2(gray)
    fm3=tenengrad1(gray)
    fm4=tenengrad2(gray)
    fm5=kurtosis(gray)
    # fm6=frequency(gray)
    # print(file, "brenner:",fm1,"smd2:",fm2,"tenengrad1:",fm3,"tenengrad2:",fm4,"kurtosis:",fm5,"frequency:",fm6)
    
    end=time.time()-start
    print(file, fm3)

    # f1.write(file+'  '+str(fm3)+'  \n')
    f1.write(file+'  '+str(fm1)+'  '+str(fm2)+'  '+str(fm3)+'  '+str(fm4)+'  '+str(fm5)+'  \n')
f1.close()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值