深度学习的训练需要数据集,首先准备一个文件夹存放采集的图像的(我的文件夹命名:TD),在文件夹中新建两个,一个存放图像,一个存放标签,(至于标签的制作熟悉的小伙伴额能都知道,如果不了解的居多,我再单独文章介绍.......)
接下来就是准备创建网络模型
InputImageWidth := 400 //宽高
InputImageHeight := 400
InputImageDepth := 3
BatchSize := 16
*
* Create the input layers.
create_dl_layer_input ('segmentation_image_target', [InputImageWidth,InputImageHeight,1], [], [], DLLayerImageTarget)
create_dl_layer_class_id_conversion (DLLayerImageTarget, 'target', 'from_class_id', [], [], DLLayerTarget)
create_dl_layer_input ('image', [InputImageWidth,InputImageHeight,InputImageDepth], [], [], DLLayerInput)
create_dl_layer_input ('weight_image', [InputImageWidth,InputImageHeight,1], [], [], DLLayerWeights)
*
* Create the core of the network.
* Note, that this exemplary network is very small and might not produce
* satisfying results, depending on your problem.
* You can create any network architecture here, that fits your needs.
create_dl_layer_convolution (DLLayerInput, 'convolution1', 5, 1, 1, 3, 1, 'none', 'relu', [], [], DLLayerConvolution1)
create_dl_layer_batch_normalization (DLLayerConvolution1, 'batchnorm1', 0.9, 0.0001, 'none', [], [], DLLayerBatchNorm1)
create_dl_layer_pooling (DLLayerBatchNorm1, 'pooling1', [2, 2], [2, 2], 'implicit', 'maximum', [], [], DLLayerPooling1)
create_dl_layer_convolution (DLLayerPooling1, 'convolution2', 5, 1, 1, 3, 1, 'none', 'relu', 'bias_filler_variance_norm', 'norm_out', DLLayerConvolution2)
create_dl_layer_batch_normalization (DLLayerConvolution2, 'batchnorm2', 0.9, 0.0001, 'none', [], [], DLLayerBatchNorm2)
create_dl_layer_pooling (DLLayerBatchNorm2, 'pooling2', [2, 2], [2, 2], 'implicit', 'maximum', [], [], DLLayerPooling2)
create_dl_layer_zoom_to_layer_size (DLLayerPooling2, DLLayerInput, 'zoom', 'bilinear', 'false', [], [], DLLayerZoom)
*
* Add a softmax and a loss layer to enable training.
create_dl_layer_softmax (DLLayerZoom, 'softmax', [], [], DLLayerSoftMax)
create_dl_layer_loss_cross_entropy (DLLayerSoftMax, DLLayerTarget, DLLayerWeights, 'loss_cross_entropy', 1, [], [], DLLayerLossCrossEntropy)
*
* Create a segmentation image with class ids.
create_dl_layer_depth_max (DLLayerSoftMax, 'segmentation_image_softmax', 'argmax', [], [], DLLayerSegmentationSoftMax, _)
create_dl_layer_class_id_conversion (DLLayerSegmentationSoftMax, 'segmentation_image', 'to_class_id', [], [], DLLayerSegmentationImage)
*
* Create a segmentation confidence image.
create_dl_layer_depth_max (DLLayerSoftMax, 'segmentation_confidence', 'value', [], [], _, DLLayerSegmentationConfidence)
*
* Create the model by passing over a list of output layers.
* The created model then contains all layers that directly
* or indirectly feed into these output layers.
create_dl_model ([DLLayerLossCrossEntropy,DLLayerSegmentationImage,DLLayerSegmentationConfidence], DLModelHandle)
set_dl_model_param (DLModelHandle, 'type', 'segmentation')
* Press Run (F5) to continue.
stop ()
*
* Part 2: Set model parameters and inspect the model.
set_dl_model_param (DLModelHandle, 'batch_size', BatchSize)
get_dl_model_param (DLModelHandle, 'summary', NetworkSummary)
*
* Part 2.1: Inspect the model using its summary.
dev_inspect_ctrl (NetworkSummary)
stop ()
*
* Part 2.2: Inspect the model using its model handle.
dev_close_inspect_ctrl (NetworkSummary)
*
dev_inspect_ctrl (DLModelHandle)
stop ()
*
*
* Part 3 (optional): Add meta data to better specify the model later.
* Press Run F5 (Run) to continue.
dev_close_inspect_ctrl (DLModelHandle)
*
MetaData := dict{}
MetaData.model_creator := 'MVTec Software GmbH'
MetaData.model_info := 'Segmentation model with BatchNorm layer'
set_dl_model_param (DLModelHandle, 'meta_data', MetaData)
write_dl_model (DLModelHandle, 'dl_model_segmentation_small.hdl')
创建模型这一步对模型有深入研究的同学可以尽情修改,如果只是应用可以基本不变。接下来就是数据的整理了,或者是预处理
ImageDir := 'TD/image' //图片路径
* Directory with ground truth segmentation images.
SegmentationDir := 'TD/labels' //原始分割图路径
*
* All example data is written to this folder.
ExampleDataDir := 'segment_TD_defects_data' //所有的样本数据存储路径
* Dataset directory basename for any outputs written by preprocess_dl_dataset.
* This name will be extended by the image dimensions the dataset will have after preprocessing.
DataDirectoryBaseName := ExampleDataDir + '/dldataset_TD_'
* Store preprocess parameters separately in order to use it, e.g., during inference.
PreprocessParamFileBaseName := '/dl_preprocess_param.hdict'
*
* *************************
* *** Set parameters ***
* *************************
*
* Class names.
ClassNames := ['No', 'Yes'] //每个文件夹存储这一类的图像,其中对应的区域就是类值,429*300
* Class IDs.
ClassIDs := [0, 1]
*
* Percentages for splitting the dataset.
TrainingPercent := 70
ValidationPercent := 15
*
* Some models may have limitations regarding the image properties,
* see read_dl_model () for the model of your choice.
* Image dimensions the images are rescaled to during preprocessing.
ImageWidth := 400
ImageHeight := 400
ImageNumChannels := 3
*
* Gray value range for gray value normalization of the images.
ImageRangeMin := -127
ImageRangeMax := 128
*
* Further parameters for image preprocessing.
NormalizationType := 'none'
DomainHandling := 'full_domain'
IgnoreClassIDs := []
SetBackgroundID := []
ClassIDsBackground := []
*
* In order to get a reproducible split we set a random seed.
* This means that re-running the script results in the same split of DLDataset.
SeedRand := 42 //随机种子
*
* ****************************************************************************
* ** Read the labeled data and split it into train/validation and test ***
* ****************************************************************************
* 读数据并分成训练和验证、测试数据集
* Set the random seed.
set_system ('seed_rand', SeedRand)
*
* Read the dataset.
read_dl_dataset_segmentation (ImageDir, SegmentationDir, ClassNames, ClassIDs, [], [], [], DLDataset)
*
* Generate the split.
split_dl_dataset (DLDataset, TrainingPercent, ValidationPercent, [])
*
* *********************************
* ** Preprocess the dataset ***
* *********************************
*
* Create the output directory if it does not exist yet.
file_exists (ExampleDataDir, FileExists)
if (not FileExists)
make_dir (ExampleDataDir)
endif
*
* Create preprocess param.
create_dl_preprocess_param ('segmentation', ImageWidth, ImageHeight, ImageNumChannels, ImageRangeMin, ImageRangeMax, NormalizationType, DomainHandling, IgnoreClassIDs, SetBackgroundID, ClassIDsBackground, [], DLPreprocessParam)
*
* Dataset directory for any outputs written by preprocess_dl_dataset.
DataDirectory := DataDirectoryBaseName + ImageWidth + 'x' + ImageHeight
*
* Preprocess the dataset. This might take a few minutes.
GenParam := dict{overwrite_files: 'auto'}
preprocess_dl_dataset (DLDataset, DataDirectory, DLPreprocessParam, GenParam, DLDatasetFilename)
*
* Store preprocess parameters separately in order to use it, e.g., during inference.
PreprocessParamFile := DataDirectory + PreprocessParamFileBaseName
write_dict (DLPreprocessParam, PreprocessParamFile, [], [])
*
* *******************************************
* ** Preview the preprocessed dataset ***
* *******************************************
*
* Before moving on to training, it is recommended to check the preprocessed dataset.
*
* Display the DLSamples for 10 randomly selected train images.
find_dl_samples (DLDataset.samples, 'split', 'train', 'match', SampleIndices)
tuple_shuffle (SampleIndices, ShuffledIndices)
read_dl_samples (DLDataset, ShuffledIndices[0:9], DLSampleBatchDisplay)
*
WindowHandleDict := dict{}
for Index := 0 to |DLSampleBatchDisplay| - 1 by 1
* Loop over samples in DLSampleBatchDisplay.
dev_display_dl_data (DLSampleBatchDisplay[Index], [], DLDataset, ['image', 'segmentation_image_ground_truth'], [], WindowHandleDict)
dev_set_window (WindowHandleDict.segmentation_image_ground_truth[1])
Text := 'Press Run (F5) to continue'
dev_disp_text (Text, 'window', 400, 40, 'black', [], [])
stop ()
endfor
*
* Close windows that have been used for visualization.
dev_close_window_dict (WindowHandleDict)
预处理后会生成一个文件夹segment_TD_defects_data,里面存储的是预处理或者整理后的数据,这个用于训练模型时候的加载。接下来就是模型的训练
ExampleDataDir := 'segment_TD_defects_data' //第一步数据预处理生成的训练数据文件夹
DataDirectory := ExampleDataDir + '/dldataset_TD_400x400'
DLDatasetFileName := DataDirectory + '/dl_dataset.hdict'
* 最终训练模型的路径
FinalModelBaseName := ExampleDataDir + '/final_dl_model_segmentation'
* 最好的评估模块路径
BestModelBaseName := ExampleDataDir + '/best_dl_model_segmentation'
* The segmentation model to be retrained.重新训练分割模型
ModelFileName := 'pretrained_dl_segmentation_enhanced.hdl'
BatchSize := 'maximum'
if (DLDeviceType == 'gpu')
BatchSize := 'maximum'
else
BatchSize := 64
endif
* Initial learning rate.
InitialLearningRate := 0.0001 //初始学习率
* Momentum should be high if batch size is small.
Momentum := 0.95 //动量
*
* Parameters used by train_dl_model.
* Number of epochs to train the model.
NumEpochs := 200 //回归次数
* 评估间隔,1就相当于每回归一次评估一次
EvaluationIntervalEpochs := 1
ChangeLearningRateEpochs := []
* Change the learning rate to the following values, e.g., InitialLearningRate * [0.1, 0.01].
* The tuple has to be of the same length as ChangeLearningRateEpochs.
ChangeLearningRateValues := []
WeightPrior := []
*
* Parameters of train_dl_model.
* Control whether training progress is displayed (true/false).控制训练的过程中是否图像显示
EnableDisplay := true
RandomSeed := 42 //随机种子
set_system ('seed_rand', RandomSeed)
*
* 为了在相同的 GPU (系统、驱动程序、 cuda 版本)上获得几乎确定的训练结果,
*可以将“ cudnn _vision ”指定为“ true”。注意,这可能会减慢训练的速度.
* set_system ('cudnn_deterministic', 'true')
*
* Set generic parameters of create_dl_train_param.
* 有关所有可用参数的概述,请参阅 create _ dl _ train _ param 的文档.
GenParamName := []
GenParamValue := []
*
* Change strategies.改变策略
* 在训练过程中改变参数是可能的
* Here, we change the learning rate if specified above. 在这里,我们改变学习率
if (|ChangeLearningRateEpochs| > 0)
ChangeStrategy := dict{}
* 指定需要改变的模型参数
ChangeStrategy.model_param := 'learning_rate' //学习率
* Start the parameter value at 'initial_value'.
ChangeStrategy.initial_value := InitialLearningRate //初始学习率
* Change the parameter value at each 'epochs' step.
ChangeStrategy.epochs := ChangeLearningRateEpochs //根据epoch改变学习率,的epoch范围
* Change the parameter value to the corresponding value in values.
ChangeStrategy.values := ChangeLearningRateValues //学习率范围
* Collect all change strategies as input.
GenParamName := [GenParamName,'change']
GenParamValue := [GenParamValue,ChangeStrategy]
endif
*
* Serialization strategies.序列化策略
* There are several options for saving intermediate models to disk (see create_dl_train_param).
* Here, the best and final model are saved to the paths set above.把最好的和最后的模型保存到设定的路径下
SerializationStrategy := dict{['type']: 'best', basename: BestModelBaseName}
GenParamName := [GenParamName,'serialize']
GenParamValue := [GenParamValue,SerializationStrategy]
SerializationStrategy := dict{['type']: 'final', basename: FinalModelBaseName}
GenParamName := [GenParamName,'serialize']
GenParamValue := [GenParamValue,SerializationStrategy]
*
* Display parameters.
*在训练的过程中评估方式默认是不显示,
*如果想要显示,选择一定比例的在训练期间进行评估,比例越低评估速度越快
SelectedPercentageTrainSamples := 0
* Set the x-axis argument of the training plots.
XAxisLabel := 'epochs'
DisplayParam := dict{}
DisplayParam.selected_percentage_train_samples := SelectedPercentageTrainSamples
DisplayParam.x_axis_label := XAxisLabel
GenParamName := [GenParamName,'display']
GenParamValue := [GenParamValue,DisplayParam]
* Check if all necessary files exist.检查所有文件是否存在
check_data_availability (ExampleDataDir, DLDatasetFileName)
*
* 读取预处理数据文件.
read_dict (DLDatasetFileName, [], [], DLDataset)
* Read in the model that was initialized during preprocessing.
read_dl_model (ModelFileName, DLModelHandle)
*
set_dl_model_param (DLModelHandle, 'device', DLDevice)
* 根据预处理的参数来设置模型参数.
DLPreprocessParam := DLDataset.preprocess_param
set_dl_model_param_based_on_preprocessing (DLModelHandle, DLPreprocessParam, DLDataset.class_ids)
*
* Set model hyper-parameters as specified above.
set_dl_model_param (DLModelHandle, 'learning_rate', InitialLearningRate)
set_dl_model_param (DLModelHandle, 'momentum', Momentum)
if (BatchSize == 'maximum' and DLDeviceType == 'gpu')
set_dl_model_param_max_gpu_batch_size (DLModelHandle, 100)
else
if (BatchSize == 'maximum')
* Please set a suitable batch size in case of 'cpu'
* training before continuing.
stop ()
endif
set_dl_model_param (DLModelHandle, 'batch_size', BatchSize)
endif
if (|WeightPrior| > 0)
set_dl_model_param (DLModelHandle, 'weight_prior', WeightPrior)
endif
set_dl_model_param (DLModelHandle, 'runtime_init', 'immediately')
create_dl_train_param (DLModelHandle, NumEpochs, EvaluationIntervalEpochs, EnableDisplay, RandomSeed, GenParamName, GenParamValue, TrainParam)
*
* Start the training by calling the training operator
* train_dl_model_batch () within the following procedure.
train_dl_model (DLDataset, DLModelHandle, TrainParam, 0.0, TrainResults, TrainInfos, EvaluationInfos)
*
经过短暂的训练看到分割效果明显变好,这里我就不再训练到结束了 ,感兴趣的同学可以训练到最后