Python指纹识别

此为对传统指纹识别原理的复现,对每步处理过程均进行了可视化,用于理解整个处理的流程,对参数并未进行过优化,若要使用建议将代码中的参数调合理。

#!/usr/bin/python
# -*- coding: UTF-8 -*-
import cv2
import sys
import scipy as sp
import math
import copy
import numpy as np
from enhance import image_enhance #见下文
from core import get_core #见下文
from skimage.morphology import skeletonize


def plt_show(image, name, draw=None, core=None):
    image = image.astype(np.uint8)
    cv2.namedWindow(name, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(name, 288, 384)
    cimg = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
    if draw:
        for point in draw.keys():
            if draw[point][0] == 1:
                cv2.circle(cimg, (point[1], point[0]), 1, (0, 0, 255), thickness=-1)
            elif draw[point][0] == 2:
                cv2.circle(cimg, (point[1], point[0]), 1, (255, 0, 0), thickness=-1)
            else:
                pass
    if core is not None:
        cv2.circle(cimg, (core[1], core[0]), 1, (0, 255, 0), thickness=-1)
    cv2.imshow(name, cimg)
    cv2.waitKey(0)

class FingerProcess(object):
    """
    图像质量判断
    纹线平滑
    指纹区域检测
    指纹方向图和频率估算
    图像二值化(将指纹图像中各像素点的灰度值设置为0或255)
    图像细化
    """
    def __init__(self, is_showing):
        self.height = 0
        self.width = 0
        self.is_showing = is_showing

    def enhanced(self, image_path):

        img = cv2.imread(image_path)
        if len(img.shape) > 2:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        if np.shape(img) != (384, 288):
            img = cv2.resize(img, (288, 384))

        enhanced_img = image_enhance(img).astype(np.int)

        if self.is_showing == True:
            plt_show(image=enhanced_img * 255, name='image_enhanced')
        return enhanced_img

    def normalization(self, image):
        """
        Gray value is limited to a certain range
        :param image:
        :return:
        """
        (self.height, self.width) = np.shape(image)
        mean = np.mean(image)
        variance = np.std(image)
        for i in range(self.height):
           for j in range(self.width):
                pixel = image[i, j]
                change = np.sqrt(1000. * (pixel - mean) / variance) if pixel > mean else\
                    -np.sqrt(1000. * (mean - pixel) / variance)
                image[i, j] = max(min(150. + change, 255), 0)
        if self.is_showing == True:
            plt_show(image=image, name='image_norm')
        return image

    def segamentation(self, image):
        """
        Distinguish between foreground and background colors
        :param image:
        :return:
        """
        region = 3
        h_region = int(np.floor(self.height / region))
        w_region = int(np.floor(self.width / region))
        # each region average and variance
        ave_region = np.zeros((h_region, w_region))
        var_region = np.zeros((h_region, w_region))
        for i in range(h_region):
            for j in range(w_region):
                image_region = image[region*i:region*(i+1), region*j:region*(j+1)]
                ave_region[i, j] = np.mean(image_region)
                var_region[i, j] = np.var(image_region)
        # total average and variance
        ave_region_total = np.mean(ave_region)
        var_region_total = np.mean(var_region)
        # foreground average and variance
        fg_ave = np.mean(ave_region[ave_region < ave_region_total])
        fg_var = np.mean(var_region[var_region > var_region_total])
        # background average and variance
        bg_ave = np.mean(ave_region[ave_region > fg_ave])
        var_region_zero = var_region[0 < var_region]
        bg_var = np.mean(var_region_zero[var_region_zero < fg_var])
        binary = np.zeros((h_region, w_region))
        # determine background region
        for i in range(h_region):
            for j in range(w_region):
                if var_region[i, j] < bg_var:
                    binary[i, j] = 1
        for i in range(1, h_region-1):
            for j in range(1, w_region-1):
                if (binary[i, j] == 1) and (np.sum(binary[i-1:i+2, j-1:j+2]) <= 5):
                    binary[i, j] = 0
                else:
                    pass
        # replace background by foreground average
        image_mask = np.zeros((self.height, self.width))
        for i in range(self.height):
            for j in range(self.width):
                if binary[int(i/3), int(j/3)] == 1:
                    image[i, j] = fg_ave
                    image_mask[i, j] = 1
                else:
                    pass
        # black and white flip
        image_mask = abs(image_mask-1)
        if self.is_showing == True:
            plt_show(image=image_mask*255, name='image_mask')
            plt_show(image=image, name='image_sega')
        return image, image_mask

    def enhance_grain(self, image, image_mask):
        """
        Enhance fingerprint lines along the ridge line
        :param image:
        :return:
        """
        # mean filter denoising
        image = np.array(image, dtype=np.float32)
        kernel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], np.float32) / 9
        image_conv = cv2.filter2D(image, -1, kernel)
        if self.is_showing == True:
            plt_show(image=image_conv, name='image_conv')
        # smoothed image matrix
        image_direction = np.zeros((self.height, self.width))
        for i in range(4, self.height-4):
            for j in range(4, self.width-4):
                sum_1 = image_conv[i, j-4] + image_conv[i, j-2] + image_conv[i, j+2] + image_conv[i, j+4]
                sum_2 = image_conv[i-2, j+4] + image_conv[i-1, j+2] + image_conv[i+1, j-2] + image_conv[
                    i+2, j-4]
                sum_3 = image_conv[i-2, j+2] + image_conv[i-4, j+4] + image_conv[i+2, j-2] + image_conv[
                    i+4, j-4]
                sum_4 = image_conv[i-2, j+1] + image_conv[i-4, j+2] + image_conv[i+2, j-1] + image_conv[
                    i+4, j-2]
                sum_5 = image_conv[i-2, j] + image_conv[i-4, j] + image_conv[i+2, j] + image_conv[i+4, j]
                sum_6 = image_conv[i-4, j-2] + image_conv[i-2, j-1] + image_conv[i+2, j+1] + image_conv[
                    i+4, j+2]
                sum_7 = image_conv[i-4, j-4] + image_conv[i-2, j-2] + image_conv[i+2, j+2] + image_conv[
                    i+4, j+4]
                sum_8 = image_conv[i-2, j-4] + image_conv[i-1, j-2] + image_conv[i+1, j+2] + image_conv[
                    i+2, j+4]
                sum_direction = np.array([sum_1, sum_2, sum_3, sum_4, sum_5, sum_6, sum_7, sum_8], np.float32)
                sum_max = np.max(sum_direction)
                sum_min = np.min(sum_direction)
                sum_ave = np.mean(sum_direction)
                sum_pixel = sum_min if(sum_max+sum_min+4*image_conv[i][j]) > (3*sum_ave) else sum_max
                image_direction[i][j] = 128 if sum_pixel > sum_ave else 255
        if self.is_showing == True:
            plt_show(image=image_direction, name='image_direction')

        image_enhanced = np.multiply(image_direction, image_mask)
        if self.is_showing == True:
            plt_show(image=image_enhanced, name='image_grain')
        image_enhanced_binary = image_enhanced != 128
        if self.is_showing == True:
            plt_show(image=image_enhanced_binary*255, name='image_grain_binary')
        return image_enhanced_binary

    def remove_voids(self, image):
        """
        Remove voids from fingerprints.
        the 4 neighborhood points of the black point are burrs if three or more are white points.
        :return:
        """
        for i in range(1, self.height-1):
            for j in range(1, self.width-1):
                if image[i, j] == 0 and (image[i, j-1] + image[i, j+1] + image[i-1, j] + image[i+1, j]) >= 3:
                    image[i, j] = 1
                else:
                    pass
        if self.is_showing == True:
            plt_show(image=image * 255, name='image_remove_voids')
        return image

    def remove_burrs(self, image):
        """
        Remove burrs from fingerprints.
        the point is white (background) and the circumference is black (foreground).
        The sum of the eight fields is two, and the hole is 0.
        :param image:
        :return:
        """
        image = image.astype(int)
        for i in range(1, self.height-1):
            for j in range(1, self.width-1):
                if image[i, j] == 1:
                    # clockwise expand of 8 neighborhood points
                    pixel_up = [image[i - 1, j]]
                    pixel_right = image[i - 1:i + 2, j + 1]
                    pixel_down = [image[i + 1, j]]
                    pixel_left = list(reversed(image[i - 1:i + 2, j - 1]))
                    # subtractively
                    neighbor_expansion = np.concatenate((pixel_up, pixel_right, pixel_down, pixel_left), axis=0)
                    neighbor_expansion_copy = np.concatenate((pixel_right, pixel_down, pixel_left, pixel_up), axis=0)
                    neighbor_sum = sum(list(map(lambda x: abs(x), neighbor_expansion-neighbor_expansion_copy)))
                    neighbor_diagonal = sum(neighbor_expansion[:3])*sum(neighbor_expansion[5:8]) \
                                        + sum(neighbor_expansion_copy[1:4])*sum(neighbor_expansion_copy[5:8])
                    if neighbor_sum != 1 and neighbor_diagonal == 0:
                        image[i, j] = 0
                    else:
                        pass
                else:
                    pass
        if self.is_showing == True:
            plt_show(image=image * 255, name='image_remove_burrs')
        return image

    def thinning(self, image):
        """
        Image thinning
        :param image:
        :return:
        """
        image = (image * 255).astype(np.uint8)
        mask = cv2.bitwise_not(image) / 255
        # implementing a skeleton algorithm
        skeleton = skeletonize(mask)
        skeleton = (skeleton * 255).astype(np.uint8)
        image_thin = cv2.bitwise_not(skeleton) / 255
        if self.is_showing == True:
            plt_show(image=image_thin*255, name='image_thin')

        return image_thin

class GetFeaturePoint(object):

    def __init__(self, image, is_showing):
        self.image = image
        (self.height, self.width) = np.shape(image)
        self.x_values = 0
        self.y_values = 0
        self.is_showing = is_showing
        self.feature_cn_sn = {}

    def line_angle(self, point, v1, v2):
        (dx1, dy1) = (v1[0] - point[0], v1[1] - point[1])
        (dx2, dy2) = (v2[0] - point[0], v2[1] - point[1])
        angle1 = math.atan2(dy1, dx1)
        angle1 = int(angle1 * 180 / math.pi)
        # print(angle1)
        angle2 = math.atan2(dy2, dx2)
        angle2 = int(angle2 * 180 / math.pi)
        # print(angle2)
        if angle1 * angle2 >= 0:
            included_angle = abs(angle1 - angle2)
        else:
            included_angle = abs(angle1) + abs(angle2)
            if included_angle > 180:
                included_angle = 360 - included_angle
        return included_angle
    def edge_walk(self, init_point, walk_step, feature_points, image, check):
        if check == True:
            if init_point in feature_points:
                return [init_point]
        height, width = np.shape(image)
        init_point = [init_point]
        direction = [(-1, 1), (-1, 0), (-1, -1), (0, 1), (0, -1), (1, 1), (1, 0), (1, -1)]
        (x_walk, y_walk, last_step) = (0, 0, (0, 0))
        if walk_step == 1:
            walk_points = []
            for step in direction:
                x_walk = init_point[0][0] + step[0]
                y_walk = init_point[0][1] + step[1]
                if x_walk >= height or x_walk < 0 or y_walk >= width or y_walk < 0:
                    pass
                elif image[x_walk, y_walk] == 0:
                    walk_points.append((x_walk, y_walk))
                else:
                    pass
            return walk_points
        else:
            for i in range(walk_step):
                walk_points = []
                if i == 0:
                    walk_direction = copy.deepcopy(direction)
                else:
                    walk_direction = copy.deepcopy(direction)
                    print(i,walk_direction,(-last_step[0], -last_step[1]))
                    walk_direction.remove((-last_step[0], -last_step[1]))
                for step in walk_direction:
                    if init_point == []:
                        return []
                    else:
                        x_walk = init_point[0][0] + step[0]
                        y_walk = init_point[0][1] + step[1]
                    if x_walk >= height or x_walk < 0 or y_walk >= width or y_walk < 0:
                        return [init_point[0]]
                    elif image[x_walk, y_walk] == 0:
                        if (x_walk, y_walk) in feature_points:
                            terminal_point = [(x_walk, y_walk)]
                            return terminal_point
                        else:
                            walk_points.append((x_walk, y_walk))
                            last_step = step
                    else:
                        return [init_point[0]]
                init_point = walk_points
        terminal_point = init_point
        return terminal_point

    def cn_and_sn(self):
        """
        cn: crossing number
        sn: sum of 8 neighborhood points
        Find all endpoints and intersections of the thinned image.
        :param image:
        :return:
        """
        for i in range(1, self.height-1):
            for j in range(1, self.width-1):
                if self.image[i, j] == 0:
                    # clockwise expand of 8 neighborhood points
                    pixel_up = [self.image[i-1, j]]
                    pixel_right = self.image[i-1:i+2, j+1]
                    pixel_down = [self.image[i+1, j]]
                    pixel_left = list(reversed(self.image[i-1:i+2, j-1]))
                    # subtractively
                    neighbor_expansion = np.concatenate((pixel_up, pixel_right, pixel_down, pixel_left), axis=0)
                    neighbor_expansion_copy = np.concatenate((pixel_right, pixel_down, pixel_left, pixel_up), axis=0)
                    cn = 1/2 * sum(list(map(lambda x: abs(x), neighbor_expansion-neighbor_expansion_copy)))
                    sn = np.sum(self.image[i-1:i+2, j-1:j+2])-self.image[i, j]
                    if cn == 1 and sn == 7:
                        self.feature_cn_sn[(i, j)] = [1]
                    elif cn == 3 and sn == 5:
                        self.feature_cn_sn[(i, j)] = [2]
        if self.is_showing == True:
            plt_show(image=self.image*255, name='features', draw=self.feature_cn_sn)
    def feature_point_angle(self):
        """

        :param feature_cn_sn:
        :return:
        """
        feature_points = list(self.feature_cn_sn.keys())
        for point in feature_points:
            rm_feature_points = copy.deepcopy(feature_points)
            rm_feature_points.remove(point)
            # endpoint angle
            if self.feature_cn_sn[point][0] == 1:
                (terminal_x, terminal_y) = self.edge_walk(init_point=point, walk_step=7, feature_points=rm_feature_points, image=self.image, check=False)[0]
                angle = math.atan2(terminal_y-point[1], terminal_x-point[0])
                angle = int(angle * 180 / math.pi)
                self.feature_cn_sn[point].append(angle)
            # intersection angle
            elif self.feature_cn_sn[point][0] == 2:
                point_diagonal = {}
                step_1_points = self.edge_walk(init_point=point, walk_step=1, feature_points=rm_feature_points, image=self.image, check=False)
                #print(step_1_points)
                point1 = step_1_points[0]
                point2 = step_1_points[1]
                point3 = step_1_points[2]
                point_diagonal[point3] = self.line_angle(point=point, v1=point1, v2=point2)
                point_diagonal[point2] = self.line_angle(point=point, v1=point1, v2=point3)
                point_diagonal[point1] = self.line_angle(point=point, v1=point2, v2=point3)
                dicSortList = sorted(point_diagonal.items(), key=lambda x: x[1])
                image_rm_point = copy.deepcopy(self.image)
                image_rm_point[point[0], point[1]] = 1
                image_rm_point[dicSortList[1][0][0], dicSortList[1][0][1]] = 1
                image_rm_point[dicSortList[2][0][0], dicSortList[2][0][1]] = 1
                (terminal_x, terminal_y) = self.edge_walk(init_point=dicSortList[0][0], walk_step=6, feature_points=feature_points, image=image_rm_point, check=True)[0]
                angle = math.atan2(terminal_y-point[1], terminal_x-point[0])
                angle = int(angle * 180 / math.pi)
                self.feature_cn_sn[point].append(angle)


    def rm_burr_and_short_ridges(self):
        """
        The line is smoothed. Find each end point so that it moves 5 pixels along the direction of the line.
         f the intersection is encountered within 5 pixels, the endpoint is considered as a burr, and removed.
        :param feature_points:
        :return:
        """
        endpoints = [k for k, v in self.feature_cn_sn.items() if v[0] == 1]
        print(endpoints)
        feature_points = self.feature_cn_sn.keys()
        rm_points = []
        for point in endpoints:
            return_point = self.edge_walk(init_point=point, walk_step=7, feature_points=feature_points, image=self.image, check=False)[0]
            if return_point in feature_points:
                rm_points.append(point)
                rm_points.append(return_point)
            else:
                pass
        rm_points = list(set(rm_points))
        for point in rm_points:
            self.feature_cn_sn.pop(point)
        print(sys._getframe().f_code.co_name, len(rm_points), len(self.feature_cn_sn))
        if self.is_showing == True:
            plt_show(image=self.image*255, name='rm_burr_and_short_ridges', draw=self.feature_cn_sn)
    def rm_boundary(self, image_mask):
        """
        Remove the endpoint of the edge of the image
        :param feature_points:
        :return:
        """
        bound_dist = 5
        rm_points = []
        for point in self.feature_cn_sn.keys():
            (x, y) = point
            if image_mask[min(x+bound_dist, self.height-1), y] == 0 \
                    or image_mask[max(x-bound_dist, 0), y] == 0 \
                    or image_mask[x, min(y+bound_dist, self.width-1)] == 0 \
                    or image_mask[x, max(y-bound_dist, 0)] == 0:
                rm_points.append(point)
            else:
                pass
        for point in rm_points:
            self.feature_cn_sn.pop(point)
        print(sys._getframe().f_code.co_name, len(rm_points), len(self.feature_cn_sn))
        if self.is_showing == True:
            plt_show(image=self.image*255, name='rm_boundary', draw=self.feature_cn_sn)
    def rm_broken_ridge(self):
        """
        There are no intersections or endpoints within 10 distances
        :param feature_points:
        :return:
        """
        endpoints = [k for k, v in self.feature_cn_sn.items() if v[0] == 1]
        endpoints_num = len(endpoints)
        rm_points = []
        for i in range(endpoints_num-1):
            for j in range(i+1, endpoints_num):
                distance = sp.spatial.distance.euclidean(endpoints[i], endpoints[j])

                if distance <= 10:
                    a_i = self.feature_cn_sn[endpoints[i]][1]
                    a_j = self.feature_cn_sn[endpoints[j]][1]
                    angle = math.atan2(endpoints[j][1]-endpoints[i][1], endpoints[j][0]-endpoints[i][0])
                    a_i_j = int(angle * 180 / math.pi)
                    if (abs(a_i_j - a_i) < 30) or (abs(a_i_j - a_j) < 30):
                        rm_points.append(endpoints[i])
                        rm_points.append(endpoints[j])
                    else:

                        pass
                else:
                    pass
        rm_points = list(set(rm_points))
        for point in rm_points:
            self.feature_cn_sn.pop(point)
        print(sys._getframe().f_code.co_name, len(rm_points), len(self.feature_cn_sn))
        if self.is_showing == True:
            plt_show(image=self.image*255, name='rm_broken_ridge', draw=self.feature_cn_sn)
    def rm_island_and_fakeBridge(self):
        intersections = [k for k, v in self.feature_cn_sn.items() if v[0] == 2]
        intersections_near = {}
        rm_points = []
        # Get neighbor intersections for every intersection
        for point in intersections:
            intersections_near[point] = []
            points = self.edge_walk(init_point=point, walk_step=1, feature_points=intersections, image=self.image, check=False)
            image_rm_point = copy.deepcopy(self.image)
            image_rm_point[point[0], point[1]] = 1
            for eachs in points:
                image_rm_point[eachs[0], eachs[1]] = 1
            for point_1_step in points:
                return_point = self.edge_walk(init_point=point_1_step, walk_step=7, feature_points=intersections, image=image_rm_point, check=True)
                if return_point == []:
                    rm_points.append(point)
                else:
                    return_point = return_point[0]
                    if return_point in intersections:
                        intersections_near[point].append(return_point)
                    else:
                        pass

        intersections_near = {k: v for k, v in intersections_near.items() if len(v) != 0}

        intersections_near_num = len(intersections_near)
        for i in range(intersections_near_num-1):
            for j in range(i+1, intersections_near_num):
                values = list(intersections_near.values())
                keys = list(intersections_near.keys())
                intersection_same = [x for x in values[i] if x in values[j]]
                # Remove island
                if len(intersection_same) != 0:
                    print('%s||%s??%s||%s' % (str(keys[i]), str(values[j]),str(keys[j]), str(values[i])))
                    rm_points.append(keys[i])
                    rm_points.append(keys[j])
                    rm_points.append(intersection_same[0])
                # Remove fake bridge
                elif (keys[j] in values[i]) or (keys[i] in values[j]):
                    print('%s||%s=%s||%s' % (str(keys[i]), str(values[j]),str(keys[j]), str(values[i])))
                    a_i = self.feature_cn_sn[keys[i]][1]
                    a_j = self.feature_cn_sn[keys[j]][1]
                    angle = math.atan2(keys[j][1] - keys[i][1],keys[j][0] - keys[i][0])
                    a_i_j = int(angle * 180 / math.pi)
                    if (70 < abs(a_i_j - a_i) < 110) or (70 < abs(a_i_j - a_j) < 110):
                        rm_points.append(keys[i])
                        rm_points.append(keys[j])
                    else:
                        pass
                else:
                    pass
        rm_points = list(set(rm_points))
        for point in rm_points:
            self.feature_cn_sn.pop(point)
        print(sys._getframe().f_code.co_name, len(rm_points), len(self.feature_cn_sn))
        if self.is_showing == True:
            plt_show(image=self.image*255, name='rm_island_and_fakeBridge', draw=self.feature_cn_sn)
        return self.feature_cn_sn
def run(image_path, is_showing=True):

    op1 = FingerProcess(is_showing=is_showing)
    image_enhanced = op1.enhanced(image_path=image_path)
    image_norm = op1.normalization(image=image_enhanced)
    image_sega, image_mask = op1.segamentation(image=image_norm)
    image_grain = op1.enhance_grain(image=image_sega, image_mask=image_mask)
    image_rm_void = op1.remove_voids(image=image_grain)
    image_rm_burr = op1.remove_burrs(image=image_rm_void)
    image_thin = op1.thinning(image=image_rm_burr)
    core = get_core(image=image_thin)[0]
    print(core)
    #plt_show(image=image_thin * 255, name='core', core=core)


    op2 = GetFeaturePoint(image=image_thin, is_showing=is_showing)
    op2.cn_and_sn()
    op2.feature_point_angle()
    op2.rm_burr_and_short_ridges()
    op2.rm_boundary(image_mask=image_mask)
    op2.rm_broken_ridge()
    feature_points = op2.rm_island_and_fakeBridge()
    return feature_points, core


if __name__ == "__main__":
    features, core = run(image_path='D:/PycharmProjects/37/fingerprint_identification/DB4_B/103_3.tif')

 其中,enhance.py如下

import numpy as np
import math
import scipy
import cv2
import sys
from scipy import ndimage, signal


def frequest(im, orientim, windsze, minWaveLength, maxWaveLength):
    rows, cols = np.shape(im)

    # Find mean orientation within the block. This is done by averaging the
    # sines and cosines of the doubled angles before reconstructing the
    # angle again.  This avoids wraparound problems at the origin.

    cosorient = np.mean(np.cos(2 * orientim))
    sinorient = np.mean(np.sin(2 * orientim))
    orient = math.atan2(sinorient, cosorient) / 2

    # Rotate the image block so that the ridges are vertical

    # ROT_mat = cv2.getRotationMatrix2D((cols/2,rows/2),orient/np.pi*180 + 90,1)
    # rotim = cv2.warpAffine(im,ROT_mat,(cols,rows))
    rotim = scipy.ndimage.rotate(im, orient / np.pi * 180 + 90, axes=(1, 0), reshape=False, order=3, mode='nearest')

    # Now crop the image so that the rotated image does not contain any
    # invalid regions.  This prevents the projection down the columns
    # from being mucked up.

    cropsze = int(np.fix(rows / np.sqrt(2)))
    offset = int(np.fix((rows - cropsze) / 2))
    rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]

    # Sum down the columns to get a projection of the grey values down
    # the ridges.

    proj = np.sum(rotim, axis=0)
    dilation = scipy.ndimage.grey_dilation(proj, windsze, structure=np.ones(windsze))

    temp = np.abs(dilation - proj)

    peak_thresh = 2

    maxpts = (temp < peak_thresh) & (proj > np.mean(proj))
    maxind = np.where(maxpts)

    rows_maxind, cols_maxind = np.shape(maxind)

    # Determine the spatial frequency of the ridges by divinding the
    # distance between the 1st and last peaks by the (No of peaks-1). If no
    # peaks are detected, or the wavelength is outside the allowed bounds,
    # the frequency image is set to 0

    if cols_maxind < 2:
        freqim = np.zeros(im.shape)
    else:
         NoOfPeaks = cols_maxind
        waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks - 1)
        if waveLength >= minWaveLength and waveLength <= maxWaveLength:
            freqim = 1 / np.double(waveLength) * np.ones(im.shape)
        else:
            freqim = np.zeros(im.shape)

    return (freqim)

def ridge_filter(im, orient, freq, kx, ky):
    angleInc = 3
    im = np.double(im)
    rows, cols = im.shape
    newim = np.zeros((rows, cols))

    freq_1d = np.reshape(freq, (1, rows * cols))
    ind = np.where(freq_1d > 0)

    ind = np.array(ind)
    ind = ind[1, :]

    # Round the array of frequencies to the nearest 0.01 to reduce the
    # number of distinct frequencies we have to deal with.

    non_zero_elems_in_freq = freq_1d[0][ind]
    non_zero_elems_in_freq = np.double(np.round((non_zero_elems_in_freq * 100))) / 100

    unfreq = np.unique(non_zero_elems_in_freq)

    # Generate filters corresponding to these distinct frequencies and
    # orientations in 'angleInc' increments.

    sigmax = 1 / unfreq[0] * kx
    sigmay = 1 / unfreq[0] * ky

    sze = np.round(3 * np.max([sigmax, sigmay]))

    x, y = np.meshgrid(np.linspace(-sze, sze, (2 * sze + 1)), np.linspace(-sze, sze, (2 * sze + 1)))

    reffilter = np.exp(-(((np.power(x, 2)) / (sigmax * sigmax) + (np.power(y, 2)) / (sigmay * sigmay)))) * np.cos(
        2 * np.pi * unfreq[0] * x) # this is the original gabor filter

    filt_rows, filt_cols = reffilter.shape

    gabor_filter = np.array(np.zeros((int(180 / angleInc), filt_rows, filt_cols)))

    for o in range(0, int(180 / angleInc)):
        # Generate rotated versions of the filter.  Note orientation
        # image provides orientation *along* the ridges, hence +90
        # degrees, and imrotate requires angles +ve anticlockwise, hence
        # the minus sign.

        rot_filt = scipy.ndimage.rotate(reffilter, -(o * angleInc + 90), reshape=False
        gabor_filter[o] = rot_filt

    # Find indices of matrix points greater than maxsze from the image
    # boundary

    maxsze = int(sze)

    temp = freq > 0
    validr, validc = np.where(temp)

    temp1 = validr > maxsze
    temp2 = validr < rows - maxsze
    temp3 = validc > maxsze
    temp4 = validc < cols - maxsze

    final_temp = temp1 & temp2 & temp3 & temp4

    finalind = np.where(final_temp)

    # Convert orientation matrix values from radians to an index value
    # that corresponds to round(degrees/angleInc)

    maxorientindex = np.round(180 / angleInc)
    orientindex = np.round(orient / np.pi * 180 / angleInc)

    # do the filtering

    for i in range(0, rows):
        for j in range(0, cols):
            if orientindex[i][j] < 1:
                orientindex[i][j] = orientindex[i][j] + maxorientindex
            if orientindex[i][j] > maxorientindex:
                orientindex[i][j] = orientindex[i][j] - maxorientindex
    finalind_rows, finalind_cols = np.shape(finalind)
    sze = int(sze)
    for k in range(0, finalind_cols):
        r = validr[finalind[0][k]]
        c = validc[finalind[0][k]]

        img_block = im[r - sze:r + sze + 1][:, c - sze:c + sze + 1]

        newim[r][c] = np.sum(img_block * gabor_filter[int(orientindex[r][c]) - 1])

    return newim

def ridge_freq(im, mask, orient, blksze, windsze, minWaveLength, maxWaveLength):
    rows, cols = im.shape
    freq = np.zeros((rows, cols))

    for r in range(0, rows - blksze, blksze):
        for c in range(0, cols - blksze, blksze):
            blkim = im[r:r + blksze][:, c:c + blksze]
            blkor = orient[r:r + blksze][:, c:c + blksze]

            freq[r:r + blksze][:, c:c + blksze] = frequest(blkim, blkor, windsze, minWaveLength, maxWaveLength)

    freq = freq * mask
    freq_1d = np.reshape(freq, (1, rows * cols))
    ind = np.where(freq_1d > 0)

    ind = np.array(ind)
    ind = ind[1, :]

    non_zero_elems_in_freq = freq_1d[0][ind]

    meanfreq = np.mean(non_zero_elems_in_freq)
    medianfreq = np.median(non_zero_elems_in_freq) # does not work properly
    return freq, meanfreq


def ridge_orient(im, gradientsigma, blocksigma, orientsmoothsigma):
    # Calculate image gradients.
    sze = np.fix(6 * gradientsigma)
    if np.remainder(sze, 2) == 0:
        sze = sze + 1

    gauss = cv2.getGaussianKernel(np.int(sze), gradientsigma)
    f = gauss * gauss.T

    fy, fx = np.gradient(f)  # Gradient of Gaussian

    # Gx = ndimage.convolve(np.double(im),fx);
    # Gy = ndimage.convolve(np.double(im),fy);

    Gx = signal.convolve2d(im, fx, mode='same')
    Gy = signal.convolve2d(im, fy, mode='same')

    Gxx = np.power(Gx, 2)
    Gyy = np.power(Gy, 2)
    Gxy = Gx * Gy

    # Now smooth the covariance data to perform a weighted summation of the data.

    sze = np.fix(6 * blocksigma)

    gauss = cv2.getGaussianKernel(np.int(sze), blocksigma)
    f = gauss * gauss.T

    Gxx = ndimage.convolve(Gxx, f)
    Gyy = ndimage.convolve(Gyy, f)
    Gxy = 2 * ndimage.convolve(Gxy, f)

    # Analytic solution of principal direction
    denom = np.sqrt(np.power(Gxy, 2) + np.power((Gxx - Gyy), 2)) + np.finfo(float).eps

    sin2theta = Gxy / denom # Sine and cosine of doubled angles
    cos2theta = (Gxx - Gyy) / denom

    if orientsmoothsigma:
        sze = np.fix(6 * orientsmoothsigma)
        if np.remainder(sze, 2) == 0:
            sze = sze + 1
        gauss = cv2.getGaussianKernel(np.int(sze), orientsmoothsigma)
        f = gauss * gauss.T
        cos2theta = ndimage.convolve(cos2theta, f)# Smoothed sine and cosine of
        sin2theta = ndimage.convolve(sin2theta, f)# doubled angles

    orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2
    return orientim


def normalise(img):
    normed = (img - np.mean(img)) / np.std(img)
    return normed


def ridge_segment(im, blksze, thresh):
    rows, cols = im.shape

    im = normalise(im)  # normalise to get zero mean and unit standard deviation

    new_rows = np.int(blksze * np.ceil((np.float(rows)) / np.float(blksze)))
    new_cols = np.int(blksze * np.ceil((np.float(cols)) / np.float(blksze)))

    padded_img = np.zeros((new_rows, new_cols))
    stddevim = np.zeros((new_rows, new_cols))

    padded_img[0:rows][:, 0:cols] = im

    for i in range(0, new_rows, blksze):
        for j in range(0, new_cols, blksze):
            block = padded_img[i:i + blksze][:, j:j + blksze]

            stddevim[i:i + blksze][:, j:j + blksze] = np.std(block) * np.ones(block.shape)

    stddevim = stddevim[0:rows][:, 0:cols]

    mask = stddevim > thresh

    mean_val = np.mean(im[mask])

    std_val = np.std(im[mask])

    normim = (im - mean_val) / std_val

    return normim, mask


def image_enhance(img):
    blksze = 16
    thresh = 0.1
    normim, mask = ridge_segment(img, blksze, thresh) # normalise the image and find a ROI
    enhanced_mask_ = mask.astype(np.int) * 255
    cv2.imwrite('./enhan/img_mask.tif', enhanced_mask_)
    gradientsigma = 1
    blocksigma = 7
    orientsmoothsigma = 7
    orientim = ridge_orient(normim, gradientsigma, blocksigma, orientsmoothsigma) # find orientation of every pixel

    blksze = 38
    windsze = 5
    minWaveLength = 5
    maxWaveLength = 15
    freq, medfreq = ridge_freq(normim, mask, orientim, blksze, windsze, minWaveLength,
                               maxWaveLength)  # find the overall frequency of ridges

    freq = medfreq * mask
    kx = 0.65
    ky = 0.65
    newim = ridge_filter(normim, orientim, freq, kx, ky)  # create gabor filter and do the actual filtering

    # th, bin_im = cv2.threshold(np.uint8(newim),0,255,cv2.THRESH_BINARY);
    return newim < -3

其中,core.py如下

import numpy as np
import cv2
import math

def get_roi(image, w_size):
    mean_array = []
    std_array = []

    roi = np.zeros(image.shape, dtype='uint8')

    for i in np.arange(0, 384, w_size):
        for j in np.arange(0, 288, w_size):
            submatrix = image[i:i + w_size, j:j + w_size]

            mean_array.append(np.mean(submatrix))
            std_array.append(np.std(submatrix))

    mean_array = np.asarray(mean_array)
    std_array = np.asarray(std_array)

    mean_max = np.amax(mean_array)
    std_max = np.amax(std_array)

    for i in np.arange(0, 384, w_size):
        for j in np.arange(0, 288, w_size):
            submatrix = image[i:i + w_size, j:j + w_size]

            mean = np.mean(submatrix)
            std = np.std(submatrix)

            dist_ratio = 1 / np.linalg.norm(np.array([151, 151]) - np.array([i, j]))
            v = 0.5 * (1 - mean / mean_max) + 0.5 * std / std_max + 10 * dist_ratio

            if v > 0.3:
                roi[i:i + w_size, j:j + w_size] = 255

    im_floodfill = roi.copy()
    h, w = np.shape(im_floodfill)
    mask = np.zeros((h+2, w+2), np.uint8)
    cv2.floodFill(im_floodfill, mask, (0, 0), 255)
    im_floodfill = cv2.bitwise_not(im_floodfill)
    roi = cv2.bitwise_or(roi, im_floodfill)

    kernel = np.ones((5, 5), np.uint8)
    roi = cv2.erode(roi, kernel, iterations=1)

    return roi

def block_gradient_direction(alpha_x, alpha_y):
    orientations = np.arctan2(alpha_y, alpha_x)
    return np.divide(orientations, 2) + (math.pi / 2)

def compute_orientation(image, w):
    #image = cv2.medianBlur(image, 5)
    image = image.astype(np.float32)
    Gx = cv2.Sobel(image, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=3)
    Gy = cv2.Sobel(image, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=3)

    alpha_x = np.square(Gx) - np.square(Gy)
    alpha_y = 2 * Gx * Gy

    alpha_x = cv2.filter2D(alpha_x, -1, np.ones((w,w)), anchor=(0,0)) / (w*w)
    alpha_y = cv2.filter2D(alpha_y, -1, np.ones((w,w)), anchor=(0,0)) / (w*w)

    orientations = block_gradient_direction(alpha_x, alpha_y)

    return orientations, alpha_x, alpha_y

def diff_ang(a,b):
    diff = a - b

    if (diff >= -math.pi/2 and diff <= math.pi/2):
        return diff
    elif diff > math.pi/2:
        return diff - math.pi
    elif diff < -math.pi/2:
        return diff + math.pi

def reduce_blobs(matrix):
    blob_list = []
    matrix = np.uint8(matrix)
    h, w = np.shape(matrix)

    while np.argwhere(matrix).shape[0] > 0:
        points = np.argwhere(matrix)
        blob_list.append(points[0])
        mask = np.zeros((h+2, w+2), np.uint8)
        cv2.floodFill(matrix, mask, (points[0][1],points[0][0]), 0)

    return np.asarray(blob_list)

def block_reduce (matrix, w):
    return matrix[0:matrix.shape[0]:w, 0:matrix.shape[1]:w]

def singular_point_detection(alpha_x, alpha_y, w_size, roi):
    #block reduce from [300,300] to [30,30] (w = 10)
    alpha_x = block_reduce(alpha_x, w_size)
    alpha_y = block_reduce(alpha_y, w_size)

    #erode the roi a little
    kernel = np.ones((5,5),np.uint8)
    roi = cv2.erode(roi, kernel, iterations = 4)

    #smooth directions
    kernel = np.array(([1,1,1],[1,2,1],[1,1,1]))
    a = cv2.filter2D(alpha_x, -1, kernel)
    b = cv2.filter2D(alpha_y, -1, kernel)


    theta_avg = np.arctan2(b, a) * 0.5 + (math.pi / 2)

    poincare_index = np.zeros(theta_avg.shape)
    for i in np.arange(1,theta_avg.shape[0]-1):
        for j in np.arange(1,theta_avg.shape[1]-1):
            if roi[i*w_size,j*w_size]:
                poincare_index[i,j]  = diff_ang(theta_avg[i-1,j-1], theta_avg[i-1,j])
                poincare_index[i,j] += diff_ang(theta_avg[i-1,j], theta_avg[i-1,j+1])
                poincare_index[i,j] += diff_ang(theta_avg[i-1,j+1], theta_avg[i,j+1])
                poincare_index[i,j] += diff_ang(theta_avg[i,j+1], theta_avg[i+1,j+1])
                poincare_index[i,j] += diff_ang(theta_avg[i+1,j+1], theta_avg[i+1,j])
                poincare_index[i,j] += diff_ang(theta_avg[i+1,j], theta_avg[i+1,j-1])
                poincare_index[i,j] += diff_ang(theta_avg[i+1,j-1], theta_avg[i,j-1])
                poincare_index[i,j] += diff_ang(theta_avg[i,j-1], theta_avg[i-1,j-1])


    cores = (poincare_index < np.radians(-135)) & (poincare_index > np.radians(-225)) * 1
    cores = reduce_blobs(cores)

    deltas = (poincare_index > np.radians(135)) & (poincare_index < np.radians(225)) * 1
    deltas = reduce_blobs(deltas)

    return deltas*w_size, cores*w_size

def get_core(image):
    roi = get_roi (image, w_size=10)
    orientations, alpha_x, alpha_y = compute_orientation(image, w=10)
    # show_orientations(enhanced, orientations, roi, label + '_orientations', w=10)

    deltas, cores = singular_point_detection(alpha_x, alpha_y, w_size=10, roi=roi)
    return cores

Python指纹识别是利用Python编程语言来开发和实现指纹识别系统的技术。指纹识别是一种生物识别技术,通过对人体指纹进行特征提取和匹配,来识别和验证个体身份。 在Python中,可以使用一些开源库和工具来实现指纹识别功能。以下是一些常用的Python库和工具: 1. OpenCV:OpenCV是一个广泛使用的计算机视觉库,可以用于图像处理和特征提取。通过OpenCV,可以对指纹图像进行预处理、特征提取和匹配。 2. scikit-image:scikit-image是一个用于图像处理的Python库,提供了许多图像处理算法和函数。可以使用scikit-image来进行指纹图像的增强、分割和特征提取。 3. numpy:numpy是Python中常用的科学计算库,提供了高效的数组操作和数学函数。在指纹识别中,可以使用numpy来处理指纹图像的像素数据。 4. PyFingerprint:PyFingerprint是一个专门用于指纹识别Python库,基于ZFM-20系列指纹传感器。该库提供了一些方便的函数和方法,用于采集指纹图像、提取特征和比对指纹。 使用这些库和工具,你可以编写Python代码来实现指纹识别系统。首先,需要采集指纹图像,然后对图像进行预处理,如增强和分割。接着,使用特征提取算法来提取指纹特征,并将其存储在数据库中。最后,当需要进行指纹验证时,可以将待验证指纹与数据库中的指纹特征进行比对,以确定其身份。 需要注意的是,指纹识别是一项复杂的技术,涉及到图像处理、模式识别和数据库管理等多个领域。因此,在实际应用中,需要综合考虑各种因素,并进行系统设计和算法优化。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值