智能车AI电磁部署学习(三)

文章目录

浅谈

前几天和学弟共同努力,把matlab下的卷积神经网络前向传播移植到C语言代码。
其实本质都是在已有框架下进行网络的搭建、训练与参数的取出。
matlab的优势在于ui界面可以使得新手快速上手构建网络,缺点是学弟至今还没能完整自动的拿matlab代码取出矩阵数据,只能靠手动逐个复制(笑cry。

py代码

实现了解析h5文件,提取出矩阵数据并分组命名,转换为C语言数组,输出一个.c文件
前置介绍
智能车AI电磁部署学习 (一)
智能车AI电磁部署学习 (二)
相关代码如下

import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
# import tensorflow.compat.v1 as tf
import tensorflow as tf2
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Input, Dense, Dropout, Activation, Flatten, ReLU
from keras.layers import DepthwiseConv2D, Conv2D, AveragePooling2D, MaxPooling2D, SeparableConv2D, add
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
from keras.layers import BatchNormalization
from keras.utils.np_utils import to_categorical


def Kernel_transform(Kernel, f):
    f.write("{\n")
    Shape = np.shape(Kernel)
    for i in range(Shape[0]):
        f.write("{")
        f.write("%12lf" % (Kernel[i][0]) + " ")
        for j in range(1, Shape[1]):
            if j != 0:
                f.write("," + "%12lf" % (Kernel[i][j]) + " ")
        if i != Shape[0] - 1:
            f.write("},\n")
        else:
            f.write("}")
    f.write("};\n\n")


def Bias_transform(Bias, f):
    f.write("{")
    Shape = np.shape(Bias)
    for i in range(Shape[0]):
        if i != 0:
            f.write("," + "%12lf" % (Bias[i]) + " ")
        else:
            f.write("%12lf" % (Bias[i]) + " ")
    f.write("};\n\n\n")


def Weights_transform(Hide_num, Kernel, Bias,  f):
    K_Shape = np.shape(Kernel)
    B_Shape = np.shape(Bias)
    f.write("double W" + str(Hide_num))
    f.write("[" + str(K_Shape[0]) + "]")
    f.write("[" + str(K_Shape[1]) + "] = ")
    Kernel_transform(Kernel, f)
    f.write("double B" + str(Hide_num))
    f.write("[" + str(B_Shape[0]) + "] = ")
    Bias_transform(Bias, f)


def Shape_transform(dense_kernel, dense_bias , f):

    dense_num = len(dense_kernel)
    f.write("unsigned int Shape")
    f.write("[" + str(dense_num) + "]")
    f.write("[2] = ")
    f.write("{\n")

    for i in range(dense_num):
        f.write("{")
        f.write("%4d" % (np.shape(dense_kernel[i])[0]) + " ")
        f.write("," + "%4d" % (np.shape(dense_kernel[i])[1]) + " ")
        if i != dense_num - 1:
            f.write("},\n")
        else:
            f.write("}")
    f.write("};\n\n")


# 模型权重提取转换
def quantitative_model_weights(in_file_name, out_file_name):
    dense_kernel = []
    dense_bias = []
    with h5py.File(in_file_name, 'r') as f:
        for group in f.keys():
            if group == "model_weights":    # 筛选权重分组
                group_read = f[group]
                for subgroup in group_read.keys():
                    dset_read = f[group + '/' + subgroup]
                    for net in dset_read.keys():
                        dset = f[group + '/' + subgroup + '/' + net]
                        dense_kernel.append(dset['kernel:0'][:])
                        dense_bias.append(dset['bias:0'][:])
    with open(out_file_name, "w") as f:
        f.write("#include <stdio.h>\n\n")
        f.write("// 模型参数导出\n\n")
        f.write("#define LAYERS_NUM %d\n\n" % len(dense_kernel))
        for i in range(len(dense_kernel)):
            f.write("#define W%d_TEMP_SIZE %d\n" % (i+1,np.shape(dense_kernel[i])[1]))
        f.write("\n\n")
        Shape_transform(dense_kernel, dense_bias, f)
        f.write("\n\n")
        for i in range(len(dense_kernel)):
            Weights_transform(i + 1, dense_kernel[i], dense_bias[i], f)


# 数据读取转换数组
def readtxt_make_arr(in_file_name, begin_str):
    with open(in_file_name, "r") as f:
        begin_flag = False
        first_line_flag = True

        train_y_out = []           # y n组 临时数组
        for line in f.readlines():  # line 行字符串
            train_x_temp = []       # x 1组 临时数组
            if line == '\n':
                break
            if line == begin_str+'\n':   # 搜索到开始标志
                begin_flag = True
                continue
            if begin_flag:
                line = line.strip('\n')  # 去掉列表中每一个元素的换行符
                str_temp = ''
                for i in line:      # 遍历字符串
                    if i == ',':
                        train_x_temp.append(int(str_temp))
                        str_temp = ''
                    else:
                        str_temp += i
                train_y_out.append(int(str_temp))

                if first_line_flag:     # 首次创建数组
                    train_x_out = np.array(train_x_temp, ndmin=1)
                    train_x_out = np.reshape(train_x_out, (1, -1))

                    first_line_flag = False
                else:                   # 数组维度添加
                    train_x_out = np.r_[train_x_out, np.reshape(np.array(train_x_temp, ndmin=1), (1, -1))]
        if not begin_flag:
            print("Not find begin line")
    train_y_out = np.array(train_y_out)
    train_y_out = np.reshape(train_y_out, (-1,1))
    return train_x_out, train_y_out


# 随机分割数组
def split_train(data, test_ratio = 0.2):
    shuffled_indices = np.random.permutation(len(data))
    test_set_size = int(len(data)*test_ratio)
    test_indices = shuffled_indices[:test_set_size]
    train_indices = shuffled_indices[test_set_size:]
    return data[train_indices], data[test_indices]


# train_x_org, train_y_org = readtxt_make_arr("usart.txt", "begin")
# train_x_y_org = np.c_[train_x_org, train_y_org]
# train_x_y, test_x_y = split_train(train_x_y_org, 0.2)
# train_x, train_y = np.split(train_x_y, [-1], axis=1)    # 分割数组最后一列做为y
# test_x, test_y = np.split(test_x_y, [-1], axis=1)

# np.split(train_x_y, [-1], axis=1) axis = 1对列操作
# np.r_数组竖向拼接
# np.c_数组横向拼接


##########  model   ##########
# model = Sequential()
# model.add(Dense(100, activation='relu', input_dim=2))
# model.add(Dense(50, activation='relu'))
# model.add(Dense(10, activation='relu'))
# model.add(Dense(2, activation='softmax'))
#
# model.compile(optimizer=keras.optimizers.SGD(lr=0.1), loss='mean_squared_error')

##########  train_data  ##########
# # x_train = np.array([[100,100],[50,10],[80,30],[1,1],
# #                     [-2,1],[-70,80],[-200,25],[-1, 10]])
# # y_train = np.array([0,0,0,0,1,1,1,1])
# train_x_org, train_y_org = readtxt_make_arr("usart.txt", "begin")
# x_train = train_x_org
# y_train = to_categorical(train_y_org.T)     # 转为独热码 (会多产生一层维度)
# y_train = np.reshape(y_train, (-1, 2))       # softmax预测输入格式 (train_num, type_num)
#
##########  fit  ##########
# model.fit(x_train,y_train,epochs=200)

##########  predict ##########
# x_test = np.array([90, 10])
# x_test = np.reshape(x_test,(1, -1))
# print(x_test)
# print(model.predict(x_test))    # 多输入格式 (1,-1)

##########  save    ##########
# model.save('softmax1.h5')

##########  load    ##########
# model = load_model('softmax1.h5')
# model.summary()
# x_test = np.array([0, 0])
# x_test = np.reshape(x_test,(1, -1))

# print(model.predict(x_test))

# quantitative_model_weights('softmax1.h5', 'Weights.c')
quantitative_model_weights //模型权重提取转换
Shape_transform 	//输出上层与本层形状 方便下层C语言调用
Weights_transform	//输出权重信息
|-Kernel_transform	//输出K值
|-Bias_transform	//输出B值

  • 2
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值