开学第一更~~~比较杂!!!

    很久没有写作啦,开学了!!准备把上学期的东西一并做个总结,算作这学期的开始!

    一 模式识别 基于BP的场景数码管数字识别

   python3.5 opencv3.0,主要用到 numpy、matplotlib、scikit-learn、pickle包

   其中nump主要用于图像矩阵数据的处理、matplotlib主要用于绘图、scikit-learn用于搭建BP网络、pickle主要用于训练模型的保存和载入。

   场景文字检测是一个复杂的问题,一般使用深度学习的方法来做场景文字识别的主要分为两个步骤:

(1)文本定位

(2)文字分割

   深度学习方面,目前主要有CTPN+Densenet。

   本例将其简单化。在单一背景下实现数码管数字识别。

    首先使用opencv中的漫水填充算法对图像进行预处理来获得文本位置。

    再将图片二值化、等间隔分割文字。

    将得到的文字图片进行归一化,神经网络对数字大小敏感。

    搭建BP网络,采用典型的三层结构,输入层、隐藏层、输出层。输入层1024个神经元、隐藏层2048个神经元、输出层10个神经元。因为图片大小为32*32,所以输入层为1024个神经元、又因为数字0-9为10分类问题,输出层为10个神经元。隐藏层为输入层的2倍,根据实验效果选定,并没有特定的方法。

    采用了S函数和tanh激励函数两种函数进行训练来比较效果。

    对于S函数,学习率 l=0.1 epochs =10000 时取得了很好的效果,准确率接近100%。同时在实际应用中效果表现的很好。

    对于tanh函数,学习率 l=0.001 epochs = 15000 时在验证集上同样取得很好的效果。但是在实际应用中效果表现较差,泛化能力差。

    对于BP网络,其矩阵形式推导公式如下:

    使用矩阵形式的运算在编程时候会大大简化代码量。

 附代码:

 训练

#!/usr/bin/python
#coding:utf-8
import pickle
import numpy as np
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix,classification_report
from sklearn.preprocessing import LabelBinarizer
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve, auc
import operator
import time
import io
from os import listdir
import matplotlib.pyplot as plt
import cv2
Dir_data_test = 'E:/Python/Num_check/DataTest/'
Dir_data = 'E:/Python/Num_check/DataSet/'
Dir_test = 'E:/Python/Num_check/test/'
Dir_model = 'E:/Python/Num_check/'
#定义双曲函数和他们的导数
def tanh(x):
    return np.tanh(x)

def tanh_deriv(x):
    return 1.0 - np.tanh(x)**2

def logistic(x):
    return 1/(1 + np.exp(-x))

def logistic_derivative(x):
    return logistic(x)*(1-logistic(x))

#定义NeuralNetwork 神经网络算法
class NeuralNetwork:
    #初始化,layes表示的是一个list,eg[10,10,3]表示第一层10个神经元,第二层10个神经元,第三层3个神经元
    def __init__(self, layers, activation='tanh'):
        
        if activation == 'logistic':
            self.activation = logistic
            self.activation_deriv = logistic_derivative
        elif activation == 'tanh':
            self.activation = tanh
            self.activation_deriv = tanh_deriv

        self.weights = []
        #循环从1开始,相当于以第二层为基准,进行权重的初始化
        for i in range(1, len(layers) - 1):
            #对当前神经节点的前驱赋值
            self.weights.append((2*np.random.random((layers[i - 1] + 1, layers[i] + 1))-1)*0.25)
            #对当前神经节点的后继赋值
            self.weights.append((2*np.random.random((layers[i] + 1, layers[i + 1]))-1)*0.25)

            #训练函数   ,X矩阵,每行是一个实例 ,y是每个实例对应的结果,learning_rate 学习率,
    # epochs,表示抽样的方法对神经网络进行更新的最大次数
    def fit(self, X, y, learning_rate=0.2, epochs=10000):
        X = np.atleast_2d(X) #确定X至少是二维的数据
        temp = np.ones([X.shape[0], X.shape[1]+1]) #初始化矩阵
        temp[:, 0:-1] = X  # adding the bias unit to the input layer
        X = temp
        y = np.array(y) #把list转换成array的形式
        for k in range(epochs):
            #随机选取一行,对神经网络进行更新
            i = np.random.randint(X.shape[0])
            a = [X[i]]
            #完成所有正向的更新
            for l in range(len(self.weights)):
                a.append(self.activation(np.dot(a[l], self.weights[l])))
                #
            error = y[i] - a[-1]                
            deltas = [error * self.activation_deriv(a[-1])]

            #开始反向计算误差,更新权重
            for l in range(len(a) - 2, 0, -1): # we need to begin at the second to last layer
                deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))
            deltas.reverse()
            for i in range(len(self.weights)):
                layer = np.atleast_2d(a[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate * layer.T.dot(delta)

    #预测函数
    def predict(self, x):
        x = np.array(x)
        temp = np.ones(x.shape[0]+1)
        temp[0:-1] = x
        a = temp
        for l in range(0, len(self.weights)):
            a = self.activation(np.dot(a, self.weights[l]))
        return a

        
if __name__ == '__main__':    
    FileList = listdir(Dir_data)		   #获取目录内容
    TestList = listdir(Dir_test)
    DataTestList = listdir(Dir_data_test)
    pkl_file = 'E:/Python/Num_check/pkl_model.pkl'
    pkl_file2 = 'E:/Python/Num_check/pkl_model2.pkl'
    m = len(FileList)
    Test_length = len(DataTestList)
    cv2.namedWindow("image",cv2.WINDOW_NORMAL)
    digits_data = []
    digits_label = []
    digits_test_data = []
    digits_test_label = []
    print("训练集.....")
    #####------训练集------#######
    for i in range(m):
        print(FileList[i])
        PicList = listdir(Dir_data+FileList[i])
        for j in range(len(PicList)):
            image = cv2.imread(Dir_data+FileList[i]+'/'+PicList[j])
            
            image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
           
            image = cv2.resize(image,(32,32),interpolation=cv2.INTER_CUBIC)
           
            data = image.reshape(1,1024)
            label = FileList[i][0]
            digits_data.extend(data)
            digits_label.extend(label)
       
    digits_data = np.array(digits_data,dtype = float)
    digits_label = np.array(digits_label,dtype = int)
  
    x = digits_data
    y = digits_label
    
    x -= x.min()
    x /= x.max()
    
    print("验证集.....")
    #####------验证集-----#######
    for i in range(Test_length):
        print(DataTestList[i])
        PicList = listdir(Dir_data_test+DataTestList[i])
        for j in range(len(PicList)):
            image = cv2.imread(Dir_data_test+DataTestList[i]+'/'+PicList[j])
            image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
            image = cv2.resize(image,(32,32),interpolation=cv2.INTER_CUBIC)
            data = image.reshape(1,1024)
            label = DataTestList[i][0]
            digits_test_data.extend(data)
            digits_test_label.extend(label)
            
    digits_test_data = np.array(digits_test_data,dtype = float)
    digits_test_label = np.array(digits_test_label,dtype = int)
    
    X_test = digits_test_data
    y_test = digits_test_label
    
    X_test -= X_test.min()
    X_test /= X_test.max()
    
    nn =NeuralNetwork([1024,2048,10],'logistic') 
    Labels_train = LabelBinarizer().fit_transform(digits_label)
    print('start fitting.....')
    nn.fit(x,Labels_train)
    predictions = []
    
    for i in range(len(X_test)):
        o = nn.predict(X_test[i])
        predictions.append(np.argmax(o))
    print(confusion_matrix(y_test, predictions))
    print(classification_report(y_test, predictions))
    
    # save model
    with open(pkl_file,'wb') as file:
        print("Saving model...")
        pickle.dump(nn,file)
        print("Done!")

  测试

#!/usr/bin/python
#coding:utf-8
import pickle
import numpy as np
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix,classification_report
from sklearn.preprocessing import LabelBinarizer
from sklearn.cross_validation import train_test_split
import operator
import time
import io
from os import listdir
import matplotlib.pyplot as plt
import cv2
from bpnn_num_chk import logistic, NeuralNetwork, logistic_derivative, tanh_deriv, tanh

Dir_model = 'E:/Python/Num_check/pkl_model.pkl'
Dir_test = 'E:/Python/Num_check/test/'
if __name__ == '__main__': 
    TestList = listdir(Dir_test)
    with open(Dir_model,'rb') as file:
        print("Loading model...")
        nn = pickle.load(file)
        print("Done!")
        cv2.namedWindow("image",cv2.WINDOW_NORMAL)
    for i in range(len(TestList)):
            test_img = cv2.imread(Dir_test+str(i)+'.jpg')
            ch = test_img.shape
            print(ch)
            if ch[2]==3:
                test_img = cv2.cvtColor(test_img,cv2.COLOR_RGB2GRAY)
            test_img = cv2.resize(test_img,(32,32),interpolation=cv2.INTER_CUBIC)
           
            cv2.imshow("image",test_img)
            cv2.waitKey(100)        
            test_img = test_img.reshape(1,1024)
            test_img = test_img[0].tolist()
            test_data = []
            test_data.extend(test_img)
            test_data = np.array(test_data,dtype = float)
            test_data -= test_data.min()
            test_data /= test_data.max()
            result = nn.predict(test_data)
            print(np.argmax(result))

  高通滤波器

###########################################################
# 
# Name:   HPF_test
# Author: Yfj 401506102@qq.com 
# Time:   2018 09 29
# Version: beta
# function: HPF
#
# 高通滤波器可以将图像轮廓提取出,亮度大的地方会更亮,此为高频的地方。根据像素与像素之间的差值提升亮度的滤波器
# 用一个核与图像作卷积
# 最后用高斯滤波器
#
###########################################################
#!/usr/bin/python 
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from scipy import ndimage
import os
import io

kernel_3x3 = np.array([[-1, -1, -1],
                      [-1,  8, -1],
                      [-1, -1, -1]])
                      
kernel_5x5 = np.array([[-1, -1, -1, -1, -1],
                      [-1,  1,  2,  1, -1],
                      [-1,  2,  4,  2,  1],
                      [-1,  1,  2,  1, -1],
                      [-1, -1, -1, -1, -1]])
                      
img = cv2.imread('E:/Python/HPF/HPF_testpic.png',0)
k3 = ndimage.convolve(img,kernel_3x3)
k5 = ndimage.convolve(img,kernel_5x5)
blurred = cv2.GaussianBlur(img,(11,11),0)
g_hpf = img -blurred
cv2.namedWindow("3x3",cv2.WINDOW_NORMAL)
cv2.namedWindow("5x5",cv2.WINDOW_NORMAL)
cv2.namedWindow("g_hpf",cv2.WINDOW_NORMAL)
cv2.imshow("3x3",k3)
cv2.imshow("5x5",k5)
cv2.imshow("g_hpf",g_hpf)
cv2.waitKey(0)
cv2.destroyAllWindows()

  低通滤波器

###########################################################
# 
# Name:   LPF_test
# Author: Yfj 401506102@qq.com 
# Time:   2018 09 29
# Version: beta
# function: LPF
#
# 低通滤波器在像素与周围像素差值小于一定值时,平滑该像素的亮度
# 用一个核与图像作卷积
# 最后用高斯滤波器
#
###########################################################
#!/usr/bin/python 
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from scipy import ndimage
import utils
import io
import os

def strokeEndgs(src, dst, blurksize = 7, endgksize = 5):
    if blurksize >=3:
        blurredSrc = cv2.medianBlur(src,blurksize)
        graySrc = cv2.cvtColor(blurredSrc,cv2.COLOR_BGR2GRAY)
    else:
        graySrc = cv2.cvtColor(src,cv2.COLOR_BGR2GRAY)
    cv2.Laplacian(graySrc,cv2.CV_8U,graySrc, ksize = endgksize)
    normalizedInverseAlpha = (1.0/255)*(255-graySrc)
    channels = cv2.split(src)
    for channel in channels:
        channel[:] = channel*normalizedInverseAlpha
    cv2.merge(channels,dst)


if __name__ == '__main__':
    img = cv2.imread('E:/Python/LPF/HPF_testpic.png')    
   
    mat = np.array(bytearray(os.urandom(img.shape[0]*img.shape[1]*3)))
    dstImage = mat.reshape(img.shape[0],img.shape[1],3)
    strokeEndgs(img,dstImage)
    cv2.namedWindow("Ori",cv2.WINDOW_NORMAL)
    cv2.namedWindow("Dst",cv2.WINDOW_NORMAL)
   
    cv2.imshow("Ori",img)
    cv2.imshow("Dst",dstImage)
    
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    

  SVM

# -*- coding: utf-8 -*-
import numpy as np
from numpy import linalg
import cvxopt
import cvxopt.solvers

def linear_kernel(x1, x2):
    return np.dot(x1, x2)

def polynomial_kernel(x, y, p=3):
    return (1 + np.dot(x, y)) ** p

def gaussian_kernel(x, y, sigma=5.0):
    return np.exp(-linalg.norm(x-y)**2 / (2 * (sigma ** 2)))

class SVM(object):

    def __init__(self, kernel=linear_kernel, C=None):
        self.kernel = kernel
        self.C = C
        if self.C is not None: self.C = float(self.C)

    def fit(self, X, y):
        n_samples, n_features = X.shape

        # Gram matrix
        K = np.zeros((n_samples, n_samples))
        for i in range(n_samples):
            for j in range(n_samples):
                K[i,j] = self.kernel(X[i], X[j])

        P = cvxopt.matrix(np.outer(y,y) * K)
        q = cvxopt.matrix(np.ones(n_samples) * -1)
        A = cvxopt.matrix(y, (1,n_samples))
        b = cvxopt.matrix(0.0)

        if self.C is None:
            G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
            h = cvxopt.matrix(np.zeros(n_samples))
        else:
            tmp1 = np.diag(np.ones(n_samples) * -1)
            tmp2 = np.identity(n_samples)
            G = cvxopt.matrix(np.vstack((tmp1, tmp2)))
            tmp1 = np.zeros(n_samples)
            tmp2 = np.ones(n_samples) * self.C
            h = cvxopt.matrix(np.hstack((tmp1, tmp2)))

        # solve QP problem
        solution = cvxopt.solvers.qp(P, q, G, h, A, b)
        # Lagrange multipliers
        '''
         数组的flatten和ravel方法将数组变为一个一维向量(铺平数组)。
         flatten方法总是返回一个拷贝后的副本,
         而ravel方法只有当有必要时才返回一个拷贝后的副本(所以该方法要快得多,尤其是在大数组上进行操作时)
       '''
        a = np.ravel(solution['x'])
        # Support vectors have non zero lagrange multipliers
        '''
        这里a>1e-5就将其视为非零
        '''
        sv = a > 1e-5     # return a list with bool values
        ind = np.arange(len(a))[sv]  # sv's index
        self.a = a[sv]
        self.sv = X[sv]  # sv's data
        self.sv_y = y[sv]  # sv's labels
        print("%d support vectors out of %d points" % (len(self.a), n_samples))

        # Intercept
        '''
        这里相当于对所有的支持向量求得的b取平均值
        '''
        self.b = 0
        for n in range(len(self.a)):
            self.b += self.sv_y[n]
            self.b -= np.sum(self.a * self.sv_y * K[ind[n],sv])
        self.b /= len(self.a)

        # Weight vector
        if self.kernel == linear_kernel:
            self.w = np.zeros(n_features)
            for n in range(len(self.a)):
                # linear_kernel相当于在原空间,故计算w不用映射到feature space
                self.w += self.a[n] * self.sv_y[n] * self.sv[n]
        else:
            self.w = None

    def project(self, X):
        # w有值,即kernel function 是 linear_kernel,直接计算即可
        if self.w is not None:
            return np.dot(X, self.w) + self.b
        # w is None --> 不是linear_kernel,w要重新计算
        # 这里没有去计算新的w(非线性情况不用计算w),直接用kernel matrix计算预测结果
        else:
            y_predict = np.zeros(len(X))
            for i in range(len(X)):
                s = 0
                for a, sv_y, sv in zip(self.a, self.sv_y, self.sv):
                    s += a * sv_y * self.kernel(X[i], sv)
                y_predict[i] = s
            return y_predict + self.b

    def predict(self, X):
        return np.sign(self.project(X))

if __name__ == "__main__":
    import pylab as pl

    def gen_lin_separable_data():
        # generate training data in the 2-d case
        mean1 = np.array([0, 2])
        mean2 = np.array([2, 0])
        cov = np.array([[0.8, 0.6], [0.6, 0.8]])
        X1 = np.random.multivariate_normal(mean1, cov, 100)
        y1 = np.ones(len(X1))
        X2 = np.random.multivariate_normal(mean2, cov, 100)
        y2 = np.ones(len(X2)) * -1
        return X1, y1, X2, y2

    def gen_non_lin_separable_data():
        mean1 = [-1, 2]
        mean2 = [1, -1]
        mean3 = [4, -4]
        mean4 = [-4, 4]
        cov = [[1.0,0.8], [0.8, 1.0]]
        X1 = np.random.multivariate_normal(mean1, cov, 50)
        X1 = np.vstack((X1, np.random.multivariate_normal(mean3, cov, 50)))
        y1 = np.ones(len(X1))
        X2 = np.random.multivariate_normal(mean2, cov, 50)
        X2 = np.vstack((X2, np.random.multivariate_normal(mean4, cov, 50)))
        y2 = np.ones(len(X2)) * -1
        return X1, y1, X2, y2

    def gen_lin_separable_overlap_data():
        # generate training data in the 2-d case
        mean1 = np.array([0, 2])
        mean2 = np.array([2, 0])
        cov = np.array([[1.5, 1.0], [1.0, 1.5]])
        X1 = np.random.multivariate_normal(mean1, cov, 100)
        y1 = np.ones(len(X1))
        X2 = np.random.multivariate_normal(mean2, cov, 100)
        y2 = np.ones(len(X2)) * -1
        return X1, y1, X2, y2

    def split_train(X1, y1, X2, y2):
        X1_train = X1[:90]
        y1_train = y1[:90]
        X2_train = X2[:90]
        y2_train = y2[:90]
        X_train = np.vstack((X1_train, X2_train))
        y_train = np.hstack((y1_train, y2_train))
        return X_train, y_train

    def split_test(X1, y1, X2, y2):
        X1_test = X1[90:]
        y1_test = y1[90:]
        X2_test = X2[90:]
        y2_test = y2[90:]
        X_test = np.vstack((X1_test, X2_test))
        y_test = np.hstack((y1_test, y2_test))
        return X_test, y_test

    # 仅仅在Linears使用此函数作图,即w存在时
    def plot_margin(X1_train, X2_train, clf):
        def f(x, w, b, c=0):
            # given x, return y such that [x,y] in on the line
            # w.x + b = c
            return (-w[0] * x - b + c) / w[1]

        pl.plot(X1_train[:,0], X1_train[:,1], "ro")
        pl.plot(X2_train[:,0], X2_train[:,1], "bo")
        pl.scatter(clf.sv[:,0], clf.sv[:,1], s=100, c="g")

        # w.x + b = 0
        a0 = -4; a1 = f(a0, clf.w, clf.b)
        b0 = 4; b1 = f(b0, clf.w, clf.b)
        pl.plot([a0,b0], [a1,b1], "k")

        # w.x + b = 1
        a0 = -4; a1 = f(a0, clf.w, clf.b, 1)
        b0 = 4; b1 = f(b0, clf.w, clf.b, 1)
        pl.plot([a0,b0], [a1,b1], "k--")

        # w.x + b = -1
        a0 = -4; a1 = f(a0, clf.w, clf.b, -1)
        b0 = 4; b1 = f(b0, clf.w, clf.b, -1)
        pl.plot([a0,b0], [a1,b1], "k--")

        pl.axis("tight")
        pl.show()

    def plot_contour(X1_train, X2_train, clf):
        # 作training sample数据点的图
        pl.plot(X1_train[:,0], X1_train[:,1], "ro")
        pl.plot(X2_train[:,0], X2_train[:,1], "bo")
        # 做support vectors 的图
        pl.scatter(clf.sv[:,0], clf.sv[:,1], s=100, c="g")
        X1, X2 = np.meshgrid(np.linspace(-6,6,50), np.linspace(-6,6,50))
        X = np.array([[x1, x2] for x1, x2 in zip(np.ravel(X1), np.ravel(X2))])
        Z = clf.project(X).reshape(X1.shape)
        # pl.contour做等值线图
        pl.contour(X1, X2, Z, [0.0], colors='k', linewidths=1, origin='lower')
        pl.contour(X1, X2, Z + 1, [0.0], colors='grey', linewidths=1, origin='lower')
        pl.contour(X1, X2, Z - 1, [0.0], colors='grey', linewidths=1, origin='lower')

        pl.axis("tight")
        pl.show()

    def test_linear():
        X1, y1, X2, y2 = gen_lin_separable_data()
        X_train, y_train = split_train(X1, y1, X2, y2)
        X_test, y_test = split_test(X1, y1, X2, y2)

        clf = SVM()
        clf.fit(X_train, y_train)

        y_predict = clf.predict(X_test)
        correct = np.sum(y_predict == y_test)
        print("%d out of %d predictions correct" % (correct, len(y_predict)))

        plot_margin(X_train[y_train==1], X_train[y_train==-1], clf)

    def test_non_linear():
        X1, y1, X2, y2 = gen_non_lin_separable_data()
        X_train, y_train = split_train(X1, y1, X2, y2)
        X_test, y_test = split_test(X1, y1, X2, y2)

        clf = SVM(gaussian_kernel)
        clf.fit(X_train, y_train)

        y_predict = clf.predict(X_test)
        correct = np.sum(y_predict == y_test)
        print("%d out of %d predictions correct" % (correct, len(y_predict)))

        plot_contour(X_train[y_train==1], X_train[y_train==-1], clf)

    def test_soft():
        X1, y1, X2, y2 = gen_lin_separable_overlap_data()
        X_train, y_train = split_train(X1, y1, X2, y2)
        X_test, y_test = split_test(X1, y1, X2, y2)

        clf = SVM(C=0.1)
        clf.fit(X_train, y_train)

        y_predict = clf.predict(X_test)
        correct = np.sum(y_predict == y_test)
        print("%d out of %d predictions correct" % (correct, len(y_predict)))

        plot_contour(X_train[y_train==1], X_train[y_train==-1], clf)

    # test_soft()
    # test_linear()
    test_non_linear()

 直方图均衡化,matlab

img = imread('6.jpg');
R=img(:,:,1);
G=img(:,:,2);
B=img(:,:,3);
M=histeq(R);
N=histeq(G);
L=histeq(B);
Dis_img = cat(3,M,N,L);
figure(1);
subplot(2,2,1);
imshow(img);
title('原图像')
subplot(2,2,2);
imshow(Dis_img);
imsave;
title('处理后的图像');
Gray_img=rgb2gray(img);
Dis = rgb2gray(Dis_img);
subplot(2,2,3);
imhist(Gray_img);
title('原灰度直方图');
subplot(2,2,4);
imhist(Dis);
title('处理后的灰度直方图');

 

 

 

 

 

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值