halcon深度学习语义分割步骤

1.最近查看halcon深度学习语义分割学习demo步骤感觉有点复杂,整理下,halcon版本20.05

dev_update_off()
*设置region显示不填充
dev_set_draw('margin')
*获取窗口句柄
dev_get_window(WindowHandle)
*这意味着重新运行脚本会导致相同的DLDataset分割。
SeedRand := 42
* 设置随机种子。
set_system ('seed_rand', SeedRand)

**************************数据预处理*****************************
* 语义分割图片的路径。
segment_image_path := 'D:/Source/SourceMe/image3/segmentationJob/Segment/segment_images'
* 语义分割标签的路径。
segment_lable_path := 'D:/Source/SourceMe/image3/segmentationJob/Segment/segment_labels'

*类名
segment_class_names := ['ok', 'ng']
*类id
segment_class_ids := [0, 1]

*读取数据集
read_dl_dataset_segmentation (segment_image_path, segment_lable_path, segment_class_names, segment_class_ids, [], [], [], DLDataset)
*分割数据 训练数据80 评估数据20 测试数据0
split_dl_dataset (DLDataset, 80, 20, [])
*预处理数据
create_dl_preprocess_param ('segmentation', 640, 640, 3, -127, 128, 'none', 'full_domain', [], [], [], [], DLPreprocessParam)

*创建字典 预处理数据
create_dict (GenParam)
set_dict_tuple (GenParam, 'overwrite_files', 'auto')

* 预处理数据保存的路径
preprocess_dataset_save_path := 'D:/Source/SourceMe/image3/segmentationJob/PreprocessDataset'
preprocess_dl_dataset (DLDataset, preprocess_dataset_save_path, DLPreprocessParam, GenParam, DLDatasetFilename)

*单独存储预处理参数
preprocess_paramter_save_path:=preprocess_dataset_save_path+'/dl_preprocess_param.hdict'
write_dict (DLPreprocessParam, preprocess_paramter_save_path, [], [])

***预览预处理的数据集***
*获取数据集中的样品
get_dict_tuple (DLDataset, 'samples', DatasetSamples)
*选择样品中训练的数据
find_dl_samples (DatasetSamples, 'split', 'train', 'match', SampleIndices)
*打乱一下数据
tuple_shuffle (SampleIndices, ShuffledIndices)
*创建窗口句柄字典
create_dict (WindowHandleDict)
*显示前十张数据
for Index := 0 to 9 by 1
    **读取数据
    read_dl_samples (DLDataset, ShuffledIndices[Index], DLSampleBatchDisplay)
    **显示数据
    dev_display_dl_data (DLSampleBatchDisplay, [], DLDataset, ['image', 'segmentation_image_ground_truth'], [], WindowHandleDict)
    get_dict_tuple (WindowHandleDict, 'segmentation_image_ground_truth', WindowHandleImage)
    dev_set_window (WindowHandleImage[1])
    Text := 'Press Run (F5) to continue'
    dev_disp_text (Text, 'window', 400, 40, 'black', [], [])
    stop ()
endfor
* 关闭用于可视化的窗口。
dev_close_window_dict (WindowHandleDict)
stop()

***********************************训练**********************************
*读取gpu还是cpu设备
query_available_dl_devices (['runtime', 'runtime'], ['gpu', 'cpu'], DLDeviceHandles)
if (|DLDeviceHandles| == 0)
    throw ('No supported device found to continue this example.')
endif
*由于query_available_dl_devices中使用的过滤器,第一个设备是GPU(如果可用)
DLDevice := DLDeviceHandles[0]
get_dl_device_param (DLDevice, 'type', DLDeviceType)
if (DLDeviceType == 'cpu')
    * The number of used threads may have an impact
    * on the training duration.
    NumThreadsTraining := 4
    set_system ('thread_num', NumThreadsTraining)
endif

*预处理文件路径
preprocess_file_path:=preprocess_dataset_save_path+'/dl_dataset.hdict'
*读取预处理文件
read_dict (preprocess_file_path, [], [], DLDataset)
*模型名称或路径
model_file_name_or_path := 'pretrained_dl_segmentation_enhanced.hdl'
*读取模型
read_dl_model (model_file_name_or_path, DLModelHandle)
*设置训练使用的设备
set_dl_model_param (DLModelHandle, 'device', DLDevice)
*获取数据集预处理参数
get_dict_tuple (DLDataset, 'preprocess_param', DLPreprocessParam)
*获取语义分割的id
get_dict_tuple (DLDataset, 'class_ids', ClassIDs)
*设置参数
set_dl_model_param_based_on_preprocessing (DLModelHandle, DLPreprocessParam, ClassIDs)
*设置学习率
set_dl_model_param (DLModelHandle, 'learning_rate', 0.0001)
*设置动量一般不用改使用默认0.9 如果批量大小较小,动量应该很高
*set_dl_model_param (DLModelHandle, 'momentum', 0.9)
*设置批量大小
set_dl_model_param (DLModelHandle, 'batch_size', 1)
*设置初始化数据
*set_dl_model_param (DLModelHandle, 'runtime_init', 'immediately')

*训练用到的参数
GenParamName := []
GenParamValue := []

*创建最好模型保存路径字典参数
create_dict(best_model_save_path_dict)
set_dict_tuple(best_model_save_path_dict, 'type', 'best')
*训练过程中最好模型的保存路径
best_model_save_path:=preprocess_dataset_save_path+'/best_dl_model_segment'
set_dict_tuple(best_model_save_path_dict, 'basename', best_model_save_path)
GenParamName := [GenParamName,'serialize']
GenParamValue := [GenParamValue,best_model_save_path_dict]

*创建训练好模型最终保存的路径
create_dict(final_model_save_path_dict)
set_dict_tuple(final_model_save_path_dict, 'type', 'final')
*训练过程中最后模型的保存路径
final_model_save_path:=preprocess_dataset_save_path+'/final_dl_model_segment'
set_dict_tuple(final_model_save_path_dict, 'basename', final_model_save_path)
GenParamName := [GenParamName,'serialize']
GenParamValue := [GenParamValue,final_model_save_path_dict]

**创建训练字典
create_dl_train_param (DLModelHandle, 2, 1, true, 42, GenParamName, GenParamValue, TrainParam)
**开始训练
train_dl_model (DLDataset, DLModelHandle, TrainParam, 0.0, TrainResults, TrainInfos, EvaluationInfos)
* 
* 在训练结束后,在关闭窗户之前停止。
dev_disp_text ('Press Run (F5) to continue', 'window', 'bottom', 'right', 'black', [], [])
stop ()
dev_close_window()
dev_close_window()

******************************评估***********************
*重新读取模型
read_dl_model (best_model_save_path, DLModelHandle)
*设置模型批量
set_dl_model_param(DLModelHandle, 'batch_size', 1)
*设置评估设备驱动
set_dl_model_param(DLModelHandle, 'device', DLDevice)
*读取预处理数据
read_dict (preprocess_file_path, [], [], DLDataset)
*设置评估参数
SegmentationMeasures := ['mean_iou', 'pixel_accuracy', 'class_pixel_accuracy', 'pixel_confusion_matrix']
create_dict (GenParamEval)
set_dict_tuple (GenParamEval, 'measures', SegmentationMeasures)
set_dict_tuple (GenParamEval, 'show_progress', 'true')
*评估
evaluate_dl_model (DLDataset, DLModelHandle, 'split', ['validation'], GenParamEval, EvaluationResult, EvalParams)
***显示结果
create_dict (WindowHandleDict)
create_dict (GenParamEvalDisplay)
set_dict_tuple (GenParamEvalDisplay, 'display_mode', ['measures', 'absolute_confusion_matrix'])
dev_display_segmentation_evaluation (EvaluationResult, EvalParams, GenParamEvalDisplay, WindowHandleDict)
dev_disp_text ('Press Run (F5) to continue', 'window', 'bottom', 'right', 'black', 'box', 'true')
stop ()
*关闭窗口
dev_close_window_dict (WindowHandleDict)
*获取数据集中的样品
get_dict_tuple (DLDataset, 'samples', DatasetSamples)
*查找评估的样品
find_dl_samples (DatasetSamples, 'split', ['validation'], 'match', SampleIndices)
*打乱数据
tuple_shuffle (SampleIndices, ShuffledIndices)

*创建窗体字典
create_dict (WindowHandleDict)
*创建显示参数
create_dict (GenParamDisplay)
set_dict_tuple (GenParamDisplay, 'segmentation_exclude_class_ids', 0)
set_dict_tuple (GenParamDisplay, 'segmentation_transparency', '80')
*显示前十张数据
for Index:=0 to 9 by 1
    *读取数据
    read_dl_samples (DLDataset, ShuffledIndices[Index], DLSampleBatch)
    *推理
    apply_dl_model (DLModelHandle, DLSampleBatch, [], DLResult)
    *显示结果
    dev_display_dl_data (DLSampleBatch, DLResult, DLDataset, ['segmentation_image_ground_truth', 'segmentation_image_result'], GenParamDisplay, WindowHandleDict)
    dev_display_continue_message (WindowHandleDict)
    stop ()
endfor
*关闭窗口
dev_close_window_dict (WindowHandleDict)
*优化推理,重新保存
set_dl_model_param (DLModelHandle, 'optimize_for_inference', 'true')
write_dl_model (DLModelHandle, best_model_save_path)
stop()

*******************************推理************************************
*读取模型
read_dl_model (best_model_save_path, DLModelHandle)
*获取分割id跟名称
get_dl_model_param (DLModelHandle, 'class_names', ClassNames)
get_dl_model_param (DLModelHandle, 'class_ids', ClassIDs)
*设置推理批量大小
set_dl_model_param (DLModelHandle, 'batch_size', 1)
*设置推理的设备
set_dl_model_param (DLModelHandle, 'device', DLDevice)
*读取预处理的字典
read_dict (preprocess_paramter_save_path, [], [], DLPreprocessParam)
stop()

*读取一张图片
read_image (ImageBatch, 'D:/Source/SourceMe/image3/segmentationJob/Segment/segment_images/3.jpg')
*把图片给入到字典
gen_dl_samples_from_images (ImageBatch, DLSampleBatch)
* 预处理数据
preprocess_dl_samples (DLSampleBatch, DLPreprocessParam)
*推理
apply_dl_model (DLModelHandle, DLSampleBatch, ['segmentation_image', 'segmentation_confidence'], DLResultBatch)
*获取检测的图片
get_dict_object (Image, DLSampleBatch, 'image')
*获取分割图片结果
get_dict_object (SegmentationImage, DLResultBatch, 'segmentation_image')
*获取每个类型的分割区域
threshold(SegmentationImage, ClassRegions, ClassIDs, ClassIDs)
count_obj(ClassRegions, Number)
*显示图片
dev_display(Image)
dev_display(ClassRegions)
*显示结果
if (Number>1)
    dev_disp_text('Ng', 'image', 100, 100, 'red', 'box', 'false')
else
    dev_disp_text('Ok', 'image', 100, 100, 'green', 'box', 'false')
endif
stop ()










  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Halcon深度学习语义分割是指使用Halcon软件进行图像处理和分析,通过深度学习算法对图像进行语义分割,即将图像中的每个像素分配到不同的类别中。语义分割可以用于目标检测、图像分割、医学影像分析等领域。 在Halcon中进行深度学习语义分割,需要进行一系列的预处理操作。预处理的目的是设置图像预处理的参数,并生成语义分割的文件,用于后续的训练。预处理的参数包括图像的宽度、高度、通道数,训练模式(CPU或GPU),训练集与验证集的比例等。预处理的代码量可能较多,但大部分是用于介绍和设置图像信息的,实际上只需要有标注好的文件和设置好的图像信息,就可以完成图像预处理。 具体操作步骤如下: 1. 使用Everything软件或全盘搜索,找到文件segment_pill_defects_deep_learning_1_preprocess.hdev,该文件路径应该在HALCON安装目录下的examples\hdevelop\Deep-Learning\Segmentation文件夹中。 2. 返回到该目录下找到images文件夹,并将生成的test1_images文件夹拷贝到images文件夹下。确保test1_images文件夹与pill在同级目录下。 3. 在images文件夹中找到labels文件夹,并将生成的test1_labels文件夹放入其中。同样,确保test1_labels文件夹与pill在同级目录下。 4. 修改代码中相应的名称,并根据需要注释部分代码。 5. 完成以上步骤后,即可进行Halcon深度学习语义分割操作。 请注意,以上是一般的操作步骤,具体的操作可能会因个人需求和实际情况而有所不同。如果有任何问题或错误,请及时指正。 #### 引用[.reference_title] - *1* *2* [Halcon 深度学习语义分割 预处理 案例解析](https://blog.csdn.net/zhuifengyx/article/details/127538927)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^insertT0,239^v3^insert_chatgpt"}} ] [.reference_item] - *3* [Halcon深度学习---语义分割(1)---数据集预处理](https://blog.csdn.net/WDX4092410/article/details/131213087)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^insertT0,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值