c++调用pb文件并生成可执行文件exe

直接上python源码!

  • 一、猫狗大战模型训练与导出模型

  1. 导入数据,并生成npy数据格式 
# noinspection PyUnresolvedReferences
# 导入相关工具包
import cv2 as cv
import numpy as np
import glob
import os
from tqdm import tqdm


def loadData(oriPath, label):
    # glob:获取指定目录下的所有文件;os.path.join:添加路径及名称
    allImagePath = glob.glob(os.path.join(oriPath, '*.jpg'))

    # 创建一个没有任何具体值的ndarray数组,是创建数组最快的方法
    x = np.empty([len(allImagePath), 224, 224, 3])
    y = np.empty([0])
    # tqdm:进度条
    for idx in tqdm(range(len(allImagePath))):
        imagePath = allImagePath[idx]
        image = cv.imread(imagePath)
        # 函数cvCvtColor实现色彩空间转换
        image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
        image = cv.resize(image, (224, 224))

        x[idx, :] = image

    # >>> np.linspace(2.0, 3.0, num=5)    array([ 2.  ,  2.25,  2.5 ,  2.75,  3.  ])
    y = np.linspace(label, label, x.shape[0])  # shpe[0]读取矩阵第一维度的长度

    return x, y


trainCatx, trainCaty = loadData('data/train/cat', 0)
trainDogx, trainDogy = loadData('data/train/dog', 1)
evalCatx, evalCaty = loadData('data/test/cat', 0)
evalDogx, evalDogy = loadData('data/test/dog', 1)

trainData = np.concatenate((trainCatx, trainDogx), axis=0)  # 合并数组,axis=0按x维axis=1按y维
trainLabel = np.concatenate((trainCaty, trainDogy), axis=0)
evalData = np.concatenate((evalCatx, evalDogx), axis=0)
evalLabel = np.concatenate((evalCaty, evalDogy), axis=0)

trainImage = trainData[233]
testImage = evalData[588]

trainImage = cv.cvtColor(trainImage.astype(np.uint8), cv.COLOR_RGB2BGR)
testImage = cv.cvtColor(testImage.astype(np.uint8), cv.COLOR_RGB2BGR)

cv.imwrite('test1.jpg', trainImage)
cv.imwrite('test2.jpg', testImage)

print(trainLabel[233])
print(evalLabel[588])

np.save('data/trainData.npy', trainData)
np.save('data/trainLabel.npy', trainLabel)
np.save('data/evalData.npy', evalData)
np.save('data/evalLabel.npy', evalLabel)

2. 对生成的npy数据进行归一化处理

import numpy as np
import tensorflow.keras as keras

trainData = np.load('data/trainData.npy')
trainLabel = np.load('data/trainLabel.npy')
evalData = np.load('data/evalData.npy')
evalLabel = np.load('data/evalLabel.npy')

print(trainData[5])

trainData = (trainData - 128.0) / 128.0
evalData = (evalData - 128.0) / 128.0

print(trainData[0])

trainLabelOnehot = keras.utils.to_categorical(trainLabel, 2)
evalLabelOnehot = keras.utils.to_categorical(evalLabel, 2)

print(trainLabel[233])
print('...')
print(trainLabelOnehot[233])
print('>>.')
print(evalLabel[158])
print(evalLabelOnehot[158])

permutation = np.random.permutation(trainData.shape[0])  # 打乱数据
trainData = trainData[permutation, :]
trainLabelOnehot = trainLabelOnehot[permutation]

permutation = np.random.permutation(evalData.shape[0])
evalData = evalData[permutation, :]
evalLabelOnehot = evalLabelOnehot[permutation]

np.save('data/trainDataNormalized.npy',trainData)
np.save('data/trainLabelOnehot.npy',trainLabelOnehot)
np.save('data/evalDataNormalized.npy',evalData)
np.save('data/evalLabelOnehot.npy',evalLabelOnehot)

3.迁移模型,并训练。同时生成h5权重文件和动态pb模型文件。

import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import numpy as np
import datetime
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
import netron
import os

mobilenet = tf.keras.applications.MobileNet(weights='imagenet', input_shape=(224, 224, 3))

x = mobilenet.get_layer('conv_pw_13_relu').output
x = layers.Flatten()(x)
x = layers.Dense(100, activation='relu')(x)
predictLayer = layers.Dense(2, activation='softmax')(x)

newModel = keras.Model(inputs=mobilenet.input, outputs=predictLayer)

for layer in newModel.layers:
    layer.trainable = False

newModel.layers[-2].trainable = True
newModel.layers[-1].trainable = True

for layer in newModel.layers:
    print(layer.trainable)

adam = keras.optimizers.Adam(0.0001)  # 学习速率

newModel.compile(loss='categorical_crossentropy',
                 optimizer=adam,
                 metrics=['categorical_accuracy'])  # 交叉熵损失函数,准确率

newModel.summary()

tensorBoard = keras.callbacks.TensorBoard(log_dir='kerasLog', write_images=1, histogram_freq=1)

trainData = np.load('data/trainDataNormalized.npy')
trainLabel = np.load('data/trainLabelOnehot.npy')

newModel.fit(trainData, trainLabel, epochs=10, batch_size=64, validation_split=0.2, callbacks=[tensorBoard], verbose=1)

# 保存
newModel.save("model.h5")


path = "./serving_model/commodity/"
verion = 1


full_model = tf.function(lambda Input: newModel(Input))
full_model = full_model.get_concrete_function(tf.TensorSpec(newModel.inputs[0].shape, newModel.inputs[0].dtype))

# Get frozen ConcreteFunction
frozen_func = convert_variables_to_constants_v2(full_model)
frozen_func.graph.as_graph_def()

layers = [op.name for op in frozen_func.graph.get_operations()]


export_path = os.path.join(
    tf.compat.as_bytes(path),
    tf.compat.as_bytes(str(verion)))

tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
                  logdir="%s" % export_path,
                  name="model.pb",
                  as_text=False)

4.对模型进行预测

import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt

model = keras.models.load_model('model.h5')

evalData = np.load('data/evalDataNormalized.npy')
evalLabel = np.load('data/evalLabelOnehot.npy')

result = model.evaluate(evalData, evalLabel)

# 预处理图像函数


def pregarePredict(path):
    image = cv.imread(path)
    image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
    image = cv.resize(image, (224, 224))
    image = (image - 128.0) / 128.0

    image = np.expand_dims(image, axis=0)

    return image


# 显示图像结果函数
def ShowImage(path):
    img = cv.imread(path)
    test = pregarePredict(path)
    y = np.argmax(model.predict(test))
    Dict = ['猫', '狗']
    plt.imshow(img[:, :, ::-1], cmap=None)
    # 支持中文
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
    plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
    plt.title(Dict[np.argmax(model.predict(test))])
    plt.axis('off')
    plt.show()


ShowImage('4.jpg')

5.导出静态pb模型文件

import tensorflow as tf
import os
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
import netron


# 导出模型方法二  导出单个pb文件
def h5_to_pb(h5_save_path, verion=1, path="./serving_model/commodity/"):
    model = SSD300((300, 300, 3), num_classes=11)
    model.load_weights(h5_save_path)
    model.summary()
    full_model = tf.function(lambda Input: model(Input))
    full_model = full_model.get_concrete_function(tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))

    # Get frozen ConcreteFunction
    frozen_func = convert_variables_to_constants_v2(full_model)
    frozen_func.graph.as_graph_def()

    layers = [op.name for op in frozen_func.graph.get_operations()]
    print("-" * 50)
    print("Frozen model layers: ")
    for layer in layers:
        print(layer)

    print("-" * 50)
    print("Frozen model inputs: ")
    print(frozen_func.inputs)
    print("Frozen model outputs: ")
    print(frozen_func.outputs)

    export_path = os.path.join(
        tf.compat.as_bytes(path),
        tf.compat.as_bytes(str(verion)))

    tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
                      logdir="%s" % export_path,
                      name="model.pb",
                      as_text=False)


if __name__ == '__main__':
    h5_to_pb("model_data/ssd_weights.h5", verion=1, path="./serving_model/commodity/")
    # modelPath = "./model/saved_model.pb"  # 给出pb模型路径
    modelPath = "b'/serving_model/commodity/1'/model.pb"
    netron.start(modelPath)
  • 二、c++调用静态pb模型并进行预测

 1.打开Visio studio 2019,配置opencv属性。

如果想在装有VS环境和python环境的电脑上生成可执行文件并在该电脑上运行,可直接下载c++版opencv程序包,配置环境变量 ,将opencv的lib文件夹和include可执行文件加载到本机环境变量中。

环境变量的加载:

(1)首先打开控制面板,找到查看高级系统设置选项

(2) 选择高级—>环境变量

(3) 选择用户变量中的path,编辑,加入变量路径。

配置VS2019

1、 配置环境的主要目的是告诉计算机OpenCV放在哪儿,以及告诉IDE应该去哪里寻找头文件和库文件。首先我们启动vs,创建一个控制台应用。

2、创建完成后打开项目,修改上方的“Debug”模式,将其修改为“x64”模式,如图:

3、依次单击界面上方的【视图】->【其他窗口】->【属性管理器】,在右侧会出现“属性管理器”界面,如图所示:

 

4、右键“Debug|x64”选择【添加新项目属性表】,添加后双击将其打开,如图所示:

 

5、属性页的“VC++目录”的“包含目录”和“库目录”,以及“链接器”中的“附加依赖项”,便是我们需要修改的地方。在“包含目录”里添加如下两个文件路径:
C:\Program Files\OpenCV\build\include
C:\Program Files\OpenCV\build\include\opencv2
配置库目录:
C:\Program Files\OpenCV\build\x64\vc15\lib
配置链接器:
在“链接器”的“输入”项中的“附加依赖项”添加opencv_world440d.lib,如图:

 

此时项目目录中会生成一个属性配置文件,可将其保存下来,下次创建opencv目录直接将该配置导入即可,无需再次配置

 

验证上述配置,同时调用生成的模型进行预测:

#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
using namespace cv;
using namespace std;
using namespace cv::dnn;
static string class_name[] = { "猫", "狗"};

int main()
{
    char img[50];
    cout << "请输入图片名称" << endl;
    cin >> img;
    Mat frame = imread(img);
    cout <<"图片高" << frame.rows << "图片宽" << frame.cols << endl;
    imshow("1", frame);
    string path = "model.pb";
    Net net = readNetFromTensorflow(path);
    printf("模型加载成功\n");
    Mat frame_32F;
    frame.convertTo(frame_32F, CV_32FC1);

    Mat blob = blobFromImage((frame_32F - 128.0) / 128.0,
        1.0,
        Size(224, 224),
        Scalar(0, 0, 0));
    net.setInput(blob);
    Mat out = net.forward();
    Point maxclass;
    minMaxLoc(out, NULL, NULL, NULL, &maxclass);
    cout << "预测结果为:" << class_name[maxclass.x] << endl;
    waitKey(0);
}

 将测试图片放到image文件夹下,测试结果

 

2.如果想在本机训练,生成可执行文件,到其他电脑仍能运行,则需要将opencv程序包直接复制到工程目录中,并在vs2019中进行配置。 

TensorFlow训练好catsvsdogs模型,导出pb文件
将pb文件加载到c++项目中,导入必须的opencv包和dll文件
编写c++项目,配置c++项目
release配置方法,导出exe文件。
可直接执行的文件exe,opencv的dll文件,pb模型文件放到一起即可直接运行。

 

至此,可将该文件夹直接拷贝到其他电脑上,运行可执行文件进行预测。

配置阶段需要的步骤:

【配置阶段】

步骤1:首先在vs2012环境下新建一个工程,工程命名为test。

步骤2:<Opencv文件准备>

(1)将opencv2.4.13安装目录\build下的include文件夹拷贝到当前工程目录下,重命名为opencv_include;

(2)将opencv2.4.13安装目录\build\x86\vc11下的staticlib文件夹拷贝到当前工程目录下,重命名为opencv_lib;

步骤3:返回到vs开发环境,在左侧【解决方案资源管理器】下目录树中右键项目名称,选择【属性】,在“配置属性”->“C/C++”->“常规”->“附加包含目录”中添加opencv_include目录(即步骤2(1)中的opencv_include的路径)。

步骤4:在“链接器”->“输入”->“附加依赖项”里添加相应的lib文件,debug版添加带d的lib,release版添加不带d的lib。详细如下:(注:这里使用了相对路径,其中“.”表示当前目录。)

//win32控制台程序需要的lib
vfw32.lib
comctl32.lib

//debug版
//opencv用到的第三方lib
//opencv各模块的lib
.\opencv_lib\IlmImfd.lib
.\opencv_lib\libjasperd.lib
.\opencv_lib\libjpegd.lib
.\opencv_lib\libpngd.lib
.\opencv_lib\libtiffd.lib
.\opencv_lib\zlibd.lib
.\opencv_lib\opencv_ml2413d.lib
.\opencv_lib\opencv_calib3d2413d.lib
.\opencv_lib\opencv_contrib2413d.lib
.\opencv_lib\opencv_core2413d.lib
.\opencv_lib\opencv_features2d2413d.lib
.\opencv_lib\opencv_flann2413d.lib
.\opencv_lib\opencv_gpu2413d.lib
.\opencv_lib\opencv_highgui2413d.lib
.\opencv_lib\opencv_imgproc2413d.lib
.\opencv_lib\opencv_legacy2413d.lib
.\opencv_lib\opencv_objdetect2413d.lib
.\opencv_lib\opencv_ts2413d.lib
.\opencv_lib\opencv_video2413d.lib
.\opencv_lib\opencv_nonfree2413d.lib
.\opencv_lib\opencv_ocl2413d.lib
.\opencv_lib\opencv_photo2413d.lib
.\opencv_lib\opencv_stitching2413d.lib
.\opencv_lib\opencv_superres2413d.lib
.\opencv_lib\opencv_videostab2413d.lib

//release版
.\opencv_lib\IlmImf.lib
.\opencv_lib\libjasper.lib
.\opencv_lib\libjpeg.lib
.\opencv_lib\libpng.lib
.\opencv_lib\libtiff.lib
.\opencv_lib\zlib.lib
.\opencv_lib\opencv_objdetect2413.lib
.\opencv_lib\opencv_ts2413.lib
.\opencv_lib\opencv_video2413.lib
.\opencv_lib\opencv_nonfree2413.lib
.\opencv_lib\opencv_ocl2413.lib
.\opencv_lib\opencv_photo2413.lib
.\opencv_lib\opencv_stitching2413.lib
.\opencv_lib\opencv_superres2413.lib
.\opencv_lib\opencv_videostab2413.lib
.\opencv_lib\opencv_calib3d2413.lib
.\opencv_lib\opencv_contrib2413.lib
.\opencv_lib\opencv_core2413.lib
.\opencv_lib\opencv_features2d2413.lib
.\opencv_lib\opencv_flann2413.lib
.\opencv_lib\opencv_gpu2413.lib
.\opencv_lib\opencv_highgui2413.lib
.\opencv_lib\opencv_imgproc2413.lib
.\opencv_lib\opencv_legacy2413.lib
.\opencv_lib\opencv_ml2413.lib
注意:需要把下图中圆圈处的默认勾选去掉。

步骤5:在“C/C++”->“代码生成”->“运行时库”中,debug版选择MTd,release版选择MT。


至此,配置已经完成。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值