python利用pytorch库导出图像分割算子

python利用pytorch库导出图像分割算子

import os
import json
import base64
import os.path as osp
import numpy as np
from PIL import Image
from labelme import utils

# 限制类别个数小于255
# lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode="P")
# colormap = imgviz.label_colormap()
# lbl_pil.putpalette(colormap.flatten())
# lbl_pil.save(filename)
class Json2Ana:
    def __init__(self, index):
        # 处理的第几个图像索引
        self.index = index
        # 默认添加背景类
        self.label_name_to_value = {'_background_': 0}
        self.class_num = {'class_num': 1}
        self.resize_wh = {'height':512,"height":512}



    def Analysis(self, file_path, resize_img=None,show_label_flag=True):
        alist = os.listdir(file_path)
        for i in range(0, len(alist)):
            if os.path.splitext(alist[i])[1] == '.json':
                file_name, _ = os.path.splitext(alist[i])
                path = os.path.join(file_path, alist[i])
                data = json.load(open(path))

                '''
                 判断data中的点数是否满足条件
                '''
                m_flag = False  #
                for shape in data['shapes']:
                    shape_type = shape['shape_type']
                    points = shape['points']
                    if (shape_type == "circle" and len(points) != 2) \
                            or (shape_type == "rectangle" and len(points) != 2) \
                            or (shape_type == "line" and len(points) != 2) \
                            or (shape_type == "point" and len(points) != 1) \
                            or (shape_type == "polygon" and len(points) < 3):
                        m_flag = True
                if m_flag:
                    continue

                out_dir = osp.basename(path).replace('.', '_')
                out_dir = osp.join(osp.dirname(path), out_dir)
                # 读取图像路径,转为np
                imagePath = os.path.join(data_path, data['imagePath'])
                with open(imagePath, 'rb') as f:
                    imageData = f.read()
                    imageData = base64.b64encode(imageData).decode('utf-8')
                img = utils.img_b64_to_arr(imageData)
                '''
                构建类名和label的字典-label_name_to_value
                '''
                for shape in data['shapes']:
                    # 获取类名,不同类名则添加
                    label_name = shape['label']
                    if label_name in self.label_name_to_value:
                        label_value = self.label_name_to_value[label_name]
                    else:
                        label_value = len(self.label_name_to_value)
                        self.label_name_to_value[label_name] = label_value
                        # classes=1,2,3...   label_value=1,2,3...
                        print('Found new class name:{}/ value: {}'.format(label_name, label_value))
                # 构建mask-lbl
                lbl, _ = utils.shapes_to_label(img.shape, data['shapes'], self.label_name_to_value)

                # 判断所在目录下是否有该文件夹,若没有则创建目录
                if not os.path.exists(seg_image):
                    os.mkdir(seg_image)
                if not os.path.exists(seg_label):
                    os.mkdir(seg_label)

                if resize_img is not None:
                    img = img.resize(resize_img)
                    lbl = lbl.resize(resize_img)
                    Image.fromarray(img.astype(np.uint8), mode="P").save(
                        osp.join(seg_image, '%s.png' % file_name))  # 保存图像名称
                    if show_label_flag:
                        utils.lblsave(osp.join(seg_label, '%s_show.png' % file_name), lbl)  # 保存显示标签
                    lbl_pil = Image.fromarray(lbl.astype(np.uint8), mode="P")
                    lbl_pil.save(osp.join(seg_label, '%s.png' % file_name))  # 保存标签

                else:
                    # 保存图像
                    Image.fromarray(img.astype(np.uint8)).save(
                        osp.join(seg_image, '%s.png' % file_name))  # 保存图像名称
                    # 保存label
                    if show_label_flag:
                        utils.lblsave(osp.join(seg_label, '%s_show.png' % file_name), lbl)  # 保存显示标签
                    lbl_pil = Image.fromarray(lbl.astype(np.uint8), mode="P")
                    lbl_pil.save(osp.join(seg_label, '%s.png' % file_name))  # 保存标签
                print('[Index: %d]==>Finished Saved : %s' % (self.index, out_dir))
                self.index += 1

    def Info_print(self):
        print(self.label_name_to_value)
        print('Total class_nums: %d' % (len(self.label_name_to_value)))

    def main(self, path, multi_files=False, resize_img=None,param_path="./params.log",show_label_flag=True):
        if multi_files:
            files = os.listdir(path)
            for file in files:
                self.Analysis(osp.join(path, file), resize_img=resize_img,show_label_flag=show_label_flag)
        else:
            self.Analysis(file_path=path, resize_img=resize_img,show_label_flag=show_label_flag)
            # 加个读取写入
            dataset_info_json = {'name_label_dict':self.label_name_to_value,'class_num':2,'width':512,"height":512,"image_path":r"D:\dataset\Test\train\annotations\images","label_path":r"D:\dataset\Test\train\annotations\labels"}
            data2json = json.dumps(dataset_info_json)
            with open(param_path, "w") as f:
                f.write(data2json)
            print(data2json)



if __name__ == '__main__':
    # 提供计算均值和方差
    # norm_mean = [0.485, 0.456, 0.406]
    # norm_std = [0.229, 0.224, 0.225]
    # json目录
    data_path = r'D:\dataset\Test\train\annotations'
    # 存mask目录
    seg_label = os.path.join(data_path, 'labels')
    # 存图目录
    seg_image = os.path.join(data_path, 'images')

    Json = Json2Ana(index=0)
    Json.main(path=data_path, multi_files=False, resize_img=None,param_path = "./params.json",show_label_flag=False)
    Json.Info_print()

from flask import Flask, request,url_for
from flask_restful import Api,Resource,reqparse

'''
生成Flask生成实例
'''
app = Flask(__name__)
api = Api(app)

'''
测试连通
'''
class TestForGetView(Resource):
    def get(self):
       res = "hello world"
       return {"res":res}


'''
测试接受json数据
'''
class TestForGetJSON(Resource):
    def get(self):
        # 1.接受json数据
        parser = reqparse.RequestParser()
        parser.add_argument('source')
        parser.add_argument('operation')
        parser.add_argument('destination')
        args = parser.parse_args()
        # 2.处理数据
        print(args)
        print(args.source)
        print(args.operation)
        print(args.destination)
        # 3.返回处理结果(json格式)pyt
        return args


'''
测试接受文件
'''
class TestForPostFile(Resource):
    def post(self):
        file = request.get_data()
        # print(file)
        # print(type(file))
        with open("11.pdf", "wb") as f:
            f.write(file)
        return 'OK'


'''
图像分类
'''
class PredictForClassificationView(Resource):
    def post(self):
        file = request.get_data()
        # print(file)
        # print(type(file))
        with open("test_for_classification.png", "wb") as f:
            f.write(file)



        return "OK"


'''
图像分割
'''
class PredictForSegementationView(Resource):
    def post(self):
        file = request.get_data()
        # print(file)
        # print(type(file))
        with open("test_for_segementation.png", "wb") as f:
            f.write(file)

        return 'OK'


'''
图像检测
'''
class PredictForDetectionView(Resource):
    def post(self):
        file = request.get_data()
        # print(file)
        # print(type(file))
        with open("test_for_detection.jpg", "wb") as f:
            f.write(file)
        return 'OK'


'''
注册API
'''
api.add_resource(TestForGetView,'/')
api.add_resource(TestForGetJSON,'/test_for_json')
api.add_resource(TestForPostFile,'/test_for_postfile')
api.add_resource(PredictForClassificationView,'/predict_for_classification')
api.add_resource(PredictForSegementationView,'/predict_for_segementation')
api.add_resource(PredictForDetectionView,'/predict_for_detection')


if __name__ == "__main__":
    app.run(host="127.0.0.1",port=5000)



using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Diagnostics;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Forms;

namespace Test4Python
{
    public partial class Form1 : Form
    {
        public Form1()
        {
            InitializeComponent();
        }

        /// <summary>
        /// 训练
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void button1_Click(object sender, EventArgs e)
        {
            try
            {
                //调用Python程序
                Process p = new Process();//开启一个新进程
                string filePath = @"D:\workplace\python\MINIST-master\algo\inference.py";//参数由目标应用程序进行分析和解释,因此必须与该应用程序的预期保持一致。
                p.StartInfo.FileName = @"D:\software\Anaconda3\python.exe";//要启动的应用程序的名称
                p.StartInfo.Arguments = filePath;
                p.StartInfo.UseShellExecute = false;//不使用shell
                p.StartInfo.RedirectStandardOutput = true;//捕获输出的消息
                p.StartInfo.RedirectStandardInput = true;//接受来自调用程序的输入信息
                p.StartInfo.RedirectStandardError = true;//重定向标准错误输出

                p.StartInfo.CreateNoWindow = true;//为true,则启动该进程而不新建窗口
                p.StartInfo.StandardErrorEncoding = Encoding.UTF8;//将编码设置成utf-8,保证中文不会乱码。

                p.Start();//开始进程
                Thread.Sleep(10000);
                string output = p.StandardOutput.ReadToEnd();

                MessageBox.Show(output);
                p.WaitForExit();

                p.Close();

                MessageBox.Show("执行完毕");

            }
            catch (Exception ex)
            {
                MessageBox.Show("报错信息:"+ex.Message);
            }
          

            

        }

        /// <summary>
        /// 预测
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void button2_Click(object sender, EventArgs e)
        {

        }
    }
}

import json


def export_task(task_path):
    data = {'train': "./algo/train.py",
            'inference': "./algo/inference.py",
            'model': "./algo/model.pth"
            }
    data2json = json.dumps(data)
    with open(task_path, "w") as f:
        f.write(data2json)
    print(data2json)

if __name__ == '__main__':
    export_task("task.task")
import json
with  open("task.json", 'r') as f:
    text = json.load(f)
    print(text)
    print(text['train'])


导出task,为一些流程语句
将所需资源,放到dll中

资源:python文件、模型权重

软件导出task中包含了该软件的保存的python文件和模型权重
task中包含python文件的路径和权重路径

再给其他语言绑定写相应的API库

其他语言,可以进行参数设置进行训练、评测等等

将task定义为json

json:
1.模型权重路径
2.训练的py路径,该py能根据传参分辨为分类、分割、检测
3.评测的py路径,该py能根据传参分辨为分类、分割、检测


中间语言是python,各语言调用python进行训练评测等等

外端写各个语言的dll

java 对应的jar包
c# 对应的dll库
c++ 对应的dll库

from __future__ import division
from tkinter import Tk,Label,Entry,IntVar,Button,Text,INSERT
from tkinter.messagebox import showinfo
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader
from datetime import datetime
import pandas as pd
import os
import cv2
import torchvision.transforms.functional as ff
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from collections import OrderedDict
import torch
import torch.nn as nn
import numpy as np
import six

device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]



'''
二 配置参数
'''
# 1.batchsize:批次大小
batchsize = 1

# 2.num_epoch:训练轮次,一般默认200
num_epoch = 10

# 3.num_classes:分类数
num_classes = 2

# 4.crop_size:裁剪尺寸
crop_size = (512, 512)  #  (512,512)

# 5.训练集的图片和label路径
train_image = r".\liver\train\image"  # r'./major_dataset_repo/major_collected_dataset/train/image'
train_label = r'.\liver\train\mask'

# 6.验证集的图片和label路径
val_image = r".\liver\valid\image"
val_label = r'.\liver\valid\mask'

# 7.测试集的图片和label路径
test_image = r".\liver\test\image"
test_label = r'.\liver\test\mask'

# 8.待转训练、验证和测试集的数据原文件
dataset_image = r'./liver/Images'
dataset_label = r'./liver/ImagesPNG'

# 9.path_test_model : 测试模型的路径
path_test_model = r'.\best_model.pth'

# 10.path_predict_model : 成像模型的路径
path_predict_model = r'.\best_model.pth'

# 11.模型的保存路径
path_saved_model = r'.\best_model.pth'

# 12.color2class_table:颜色值与类别值的对应表
path_color2class_table = r".\color2class_table.csv"

# 13.指定设备
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

# 14.(norm_mean,norm_std):数据集的均值和标准差
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
# 验证轮数
val_interval = 10

'''
三 图像数据处理
'''
def access_raw_label(frame):
    '''
           读取color2class_table,将图片的rgb三通道彩色值转为一通道的class值
    '''
    #  读取color2class_table的颜色值与类别值的对应表
    dataframe = pd.read_csv(path_color2class_table)
    list_rgb = []
    list_class_id = []
    for i in range(len(dataframe)):
        rgb = str(list(dataframe.iloc[i][2:]))
        class_id = dataframe.iloc[i][0]
        list_rgb.append(rgb)
        list_class_id.append(class_id)
    dict_color2class = dict(zip(list_rgb, list_class_id))
    # 创建空数组用于存放一通道的label
    label = np.empty([crop_size[0], crop_size[1]], dtype=int)
    # print(frame.shape)  # shape内包含三个元素:按顺序为高、宽、通道数
    height = frame.shape[0]
    weight = frame.shape[1]
    #  print("weight : %s, height : %s" % (weight, height))
    # 遍历dict_color2class进行三通道与一通道的转换
    for row in range(height):            #遍历高
        for col in range(weight):         #遍历宽
            channel_values = frame[row, col]
            #  print(channel_values)
            for i in dict_color2class:
                #  print(i)
                if i == str(list(channel_values)):
                    #print("true")
                    label[row, col] = dict_color2class[i]
                    break;

    return label


class LoadDataset(Dataset):
    def __init__(self, file_path=[], crop_size=None):
        """para:
            file_path(list): 数据和标签路径,列表元素第一个为图片路径,第二个为标签路径
        """
        # 1 正确读入图片和标签路径
        if len(file_path) != 2:
            raise ValueError("同时需要图片和标签文件夹的路径,图片路径在前")
        self.img_path = file_path[0]
        self.label_path = file_path[1]
        # 2 从路径中取出图片和标签数据的文件名保持到两个列表当中(程序中的数据来源)
        self.imgs = self.read_file(self.img_path)
        self.labels = self.read_file(self.label_path)
        # 3 初始化数据处理函数设置
        self.crop_size = crop_size

    def __getitem__(self, index):
        # 因为对image和label的路径做了排序,所以这里同一个index,就能对应上image和label
        img = self.imgs[index]
        print(img)
        label = self.labels[index]
        # 从文件名中读取数据(图片和标签都是png格式的图像数据)
        img = cv2.imread(img)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # cv2默认为bgr顺序,这里进行了rgb转换
        label = cv2.imread(label)
        label = cv2.cvtColor(label, cv2.COLOR_BGR2RGB)  # cv2默认为bgr顺序,这里进行了rgb转换
        #img, label = self.center_crop(img, label, self.crop_size) # 中心裁剪
        img, label = self.img_transform(img, label)
        # print('处理后的图片和标签大小:',img.shape, label.shape)
        sample = {'img': img, 'label': label}
        # arr_img =  img.numpy()
        # arr_label = label.numpy()
        # print("arr_img:::::",arr_img)
        # print("arr_label::::",arr_label)
        # print('处理后的图片和标签大小:', img.shape, label.shape)
        ''' **重要查看处**  '''
        # print(set(list(label.view(1, -1).unsqueeze(0)[0][0].numpy())))
        return sample

    def __len__(self):
        return  len(self.imgs)

    def read_file(self, path):  # 图片的完整路径
        """从文件夹中读取数据"""
        files_list = os.listdir(path)
        file_path_list = [os.path.join(path, img) for img in files_list]
        file_path_list.sort()  # 图片路径排序
        return file_path_list

    def center_crop(self, data, label, crop_size):
        """裁剪输入的图片和标签大小"""
        data = ff.center_crop(data, crop_size)
        label = ff.center_crop(label, crop_size)
        return data, label

    # 重要修改处
    def img_transform(self, img, label):
        """对图片和标签做一些数值处理"""
        # 1.img:图片处理
        transform_img = transforms.Compose(
            [
                transforms.ToTensor(),
                transforms.Normalize(norm_mean, norm_std)
            ]
        )
        img = transform_img(img)


        #  2.label:标签处理
        #  label = np.array(label)  # 以免不是np格式的数据
        label = access_raw_label(label)  # 3通道转1通道,并且进行class_id的转换
        label = torch.from_numpy(label)  # np.array转tensor
        label = label.long()  # 数据类型转long类型


        return img, label


'''
四 评测指标计算
'''
def calc_semantic_segmentation_confusion(pred_labels, gt_labels):
    pred_labels = iter(pred_labels)
    gt_labels = iter(gt_labels)

    n_class = num_classes
    confusion = np.zeros((n_class, n_class), dtype=np.int64)
    for pred_label, gt_label in six.moves.zip(pred_labels, gt_labels):
        if pred_label.ndim != 2 or gt_label.ndim != 2:
            raise ValueError('ndim of labels should be two.')
        if pred_label.shape != gt_label.shape:
            raise ValueError('Shape of ground truth and prediction should'
                             ' be same.')
        pred_label = pred_label.flatten()   # (168960, )
        gt_label = gt_label.flatten()   # (168960, )

        # Dynamically expand the confusion matrix if necessary.
        lb_max = np.max((pred_label, gt_label))
        # print(lb_max)
        if lb_max >= n_class:
            expanded_confusion = np.zeros(
                (lb_max + 1, lb_max + 1), dtype=np.int64)
            expanded_confusion[0:n_class, 0:n_class] = confusion

            n_class = lb_max + 1
            confusion = expanded_confusion

        # Count statistics from valid pixels.  极度巧妙 × class_nums 正好使得每个ij能够对应.
        mask = gt_label >= 0
        confusion += np.bincount(
            n_class * gt_label[mask].astype(int) + pred_label[mask],
            minlength=n_class ** 2)\
            .reshape((n_class, n_class))

    for iter_ in (pred_labels, gt_labels):
        # This code assumes any iterator does not contain None as its items.
        if next(iter_, None) is not None:
            raise ValueError('Length of input iterables need to be same')

    return confusion

# PA
def Pixel_Accuracy(confusion_matrix):
    Acc = np.diag(confusion_matrix).sum() / confusion_matrix.sum()
    return Acc

# MPA
def Pixel_Accuracy_Class(confusion_matrix):
        Acc = np.diag(confusion_matrix) / confusion_matrix.sum(axis=1)
        Acc = np.nanmean(Acc)
        return Acc

# MIoU
def Mean_Intersection_over_Union(confusion_matrix):
    MIoU = np.diag(confusion_matrix) / (
            np.sum(confusion_matrix, axis=1) + np.sum(confusion_matrix, axis=0) -
            np.diag(confusion_matrix))
    MIoU = np.nanmean(MIoU)  # 跳过0值求mean,shape:[21]
    return MIoU

# FWIoU
def Frequency_Weighted_Intersection_over_Union(confusion_matrix):
    freq = np.sum(confusion_matrix, axis=1) / np.sum(confusion_matrix)
    iu = np.diag(confusion_matrix) / (
            np.sum(confusion_matrix, axis=1) + np.sum(confusion_matrix, axis=0) -
            np.diag(confusion_matrix))

    FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
    return FWIoU


def eval_semantic_segmentation(pred_labels, gt_labels):
    confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels)
    pa = Pixel_Accuracy(confusion)
    mpa = Pixel_Accuracy_Class(confusion)
    miou = Mean_Intersection_over_Union(confusion)
    fwiou = Frequency_Weighted_Intersection_over_Union(confusion)


    return {
            'pa': pa,
            "mpa": mpa,
            'miou': miou,
            'fwiou':fwiou,
            }


'''
五 网络模型
'''
class UNet(nn.Module):

    def __init__(self, in_channels=3, num_classes=1, init_features=32):
        super(UNet, self).__init__()

        features = init_features
        self.encoder1 = UNet._block(in_channels, features, name="enc1")
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.encoder2 = UNet._block(features, features * 2, name="enc2")
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.encoder3 = UNet._block(features * 2, features * 4, name="enc3")
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.encoder4 = UNet._block(features * 4, features * 8, name="enc4")
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.bottleneck = UNet._block(features * 8, features * 16, name="bottleneck")

        self.upconv4 = nn.ConvTranspose2d(
            features * 16, features * 8, kernel_size=2, stride=2
        )
        self.decoder4 = UNet._block((features * 8) * 2, features * 8, name="dec4")
        self.upconv3 = nn.ConvTranspose2d(
            features * 8, features * 4, kernel_size=2, stride=2
        )
        self.decoder3 = UNet._block((features * 4) * 2, features * 4, name="dec3")
        self.upconv2 = nn.ConvTranspose2d(
            features * 4, features * 2, kernel_size=2, stride=2
        )
        self.decoder2 = UNet._block((features * 2) * 2, features * 2, name="dec2")
        self.upconv1 = nn.ConvTranspose2d(
            features * 2, features, kernel_size=2, stride=2
        )
        self.decoder1 = UNet._block(features * 2, features, name="dec1")

        self.conv = nn.Conv2d(
            in_channels=features, out_channels=num_classes, kernel_size=1
        )

    def forward(self, x):
        # 编码器
        enc1 = self.encoder1(x);#print('enc1:', enc1.size())
        enc2 = self.encoder2(self.pool1(enc1));#print('enc2:', enc2.size())
        enc3 = self.encoder3(self.pool2(enc2));#print('enc3:', enc3.size())
        enc4 = self.encoder4(self.pool3(enc3));#print('enc4:', enc4.size())

       # bottleneck
        bottleneck = self.bottleneck(self.pool4(enc4));#print('bottleneck:', bottleneck.size())

       # 解码器
        dec4 = self.upconv4(bottleneck);#print('dec4:', dec4.size())
        dec4 = torch.cat((dec4, enc4), dim=1);#print('dec4:', dec4.size())  # 那根线
        dec4 = self.decoder4(dec4);#print('dec4:', dec4.size())

        dec3 = self.upconv3(dec4);#print('dec3:', dec3.size())
        dec3 = torch.cat((dec3, enc3), dim=1);#print('dec3:', dec3.size())
        dec3 = self.decoder3(dec3);#print('dec3:', dec3.size())

        dec2 = self.upconv2(dec3);#print('dec2:', dec2.size())
        dec2 = torch.cat((dec2, enc2), dim=1);#print('dec2:', dec2.size())
        dec2 = self.decoder2(dec2);#print('dec2:', dec2.size())

        dec1 = self.upconv1(dec2);#print('dec1:', dec1.size())
        dec1 = torch.cat((dec1, enc1), dim=1);#print('dec1:', dec1.size())
        dec1 = self.decoder1(dec1);#print('dec1:', dec1.size())

        return torch.sigmoid(self.conv(dec1))

    @staticmethod
    def _block(in_channels, features, name):
        return nn.Sequential(
            OrderedDict(
                [
                    (
                        name + "conv1",
                        nn.Conv2d(
                            in_channels=in_channels, # 确定卷积核的深度
                            out_channels=features, # 确实输出的特征图深度,即卷积核组的多少
                            kernel_size=3,
                            padding=1,
                            bias=False,
                        ),
                    ),
                    (name + "norm1", nn.BatchNorm2d(num_features=features)),
                    (name + "relu1", nn.ReLU(inplace=True)),
                    (
                        name + "conv2",
                        nn.Conv2d(
                            in_channels=features,
                            out_channels=features,
                            kernel_size=3,
                            padding=1,
                            bias=False,
                        ),
                    ),
                    (name + "norm2", nn.BatchNorm2d(num_features=features)),
                    (name + "relu2", nn.ReLU(inplace=True)),
                ]
            )
        )














class SegWindow(Tk):
    def __init__(self):
        super().__init__()
        self.title("重命名小软件") # 设置界面标题
        self.geometry("600x400") # 设置登录界面长宽
        self.resizable(0,0)  # 禁止改变窗体大小
        # self.iconbitmap(".//img//student.ico")  # 设置窗体图标

        #self["bg"] = "RoyalBlue"
        self.origin_path = ""
        self.target_path = ""
        self.rename_way_string = "test"
        self.files_path = []
        self.new_name = []

        # 加载窗体
        self.setup_ui()


    def setup_ui(self):
        # # label:原路径
        # self.Label_origin_path = Label(self,text="重命名文件夹路径:")
        # self.Label_origin_path.pack()
        #
        # # label:目标路径
        # self.Label_target_path = Label(self,text="重命名文件夹存放路径:")
        # self.Label_target_path.pack()
        #
        # # 重命名方式
        # self.Label_rename_way = Label(self,text="重命名方式:")
        # self.Label_rename_way.pack()

        # 重命名进度条
        # 按钮2:复制到目标路径下重命名
        self.richText = Text(self)
        self.richText.pack()
        self.Button_rename_copy_otherdir = Button(self,text="训练",command =self.start_train)
        self.Button_rename_copy_otherdir.pack(side="left")
        self.Button_rename_copy_otherdir = Button(self,text="预测",command =self.predict_img)
        self.Button_rename_copy_otherdir.pack(side="right")

    def predict_img(self):

        '''
        推理预测
        '''
        # 导入数据
        Load_test = LoadDataset([train_image, train_label], crop_size)
        test_data = DataLoader(Load_test, batch_size=1)
        # 导入模型
        net = UNet(in_channels=3, num_classes=2)
        net.eval()  # 参数固化
        net.to(device)  # 送入指定设备
        # 加载模型参数
        net.load_state_dict(torch.load(path_predict_model))
        # 加载color2class_table:颜色值与类别值的对应表
        color2class_table = pd.read_csv(path_color2class_table)

        # predict
        def addImage(img1_path, img2_path):
            img1 = cv2.imread(img1_path)
            img = cv2.imread(img2_path)
            h, w, _ = img1.shape
            # 函数要求两张图必须是同一个size
            img2 = cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA)
            # print img1.shape, img2.shape
            # alpha,beta,gamma可调
            alpha = 0.7
            beta = 1 - alpha
            gamma = 0
            img_add = cv2.addWeighted(img1, alpha, img2, beta, gamma)
            cv2.namedWindow('addImage')
            cv2.imshow('img_add', img_add)
            cv2.waitKey()
            cv2.destroyAllWindows()

        def pred2show(mask, iii):
            # 1.读取对应表,确定对应关系
            dataframe = pd.read_csv(path_color2class_table)
            list_rgb = []
            list_class_id = []
            for i in range(len(dataframe)):
                rgb = list(dataframe.iloc[i][2:])
                class_id = int(dataframe.iloc[i][0])
                list_rgb.append(rgb)
                list_class_id.append(class_id)
            for i in range(len(list_rgb)):
                list_rgb[i] = i * 255
            dict_color2class = dict(zip(list_class_id, list_rgb))

            # 2.创建空数组
            pred = np.empty([crop_size[0], crop_size[1]], dtype=int)
            # print(frame.shape)  # shape内包含三个元素:按顺序为高、宽、通道数
            height = mask.shape[0]
            weight = mask.shape[1]

            # 3.遍历mask,根据对应关系填充rgb
            for row in range(height):  # 遍历高
                for col in range(weight):  # 遍历宽
                    pred[row, col] = np.array(dict_color2class[mask[row, col]])
            cv2.imwrite("test" + str(iii) + ".png", pred)
            img_show = cv2.imread("test" + str(iii) + ".png")
            cv2.imshow("test", img_show)
            cv2.waitKey(0)

        # addImage("test.png",r"D:\PycharmProjects\AI_Demo\major_dataset_repo\segementation\WHDLD\train\image\wh0001.jpg")

        for i, sample in enumerate(test_data):
            valImg = sample['img'].to(device)
            out = net(valImg)
            import datetime
            startTime = datetime.datetime.now()
            out = net(valImg)
            endTime = datetime.datetime.now()
            durTime = 'funtion time use:%dms' % (
                        (endTime - startTime).seconds * 1000 + (endTime - startTime).microseconds / 1000)
            print(durTime)

            out = F.log_softmax(out, dim=1)
            pre_label = out.max(1)[1].squeeze().cpu().data.numpy()
            rgb = pred2show(pre_label, i)
            addImage("test" + str(i) + ".png", test_img_paths[i])

    def start_train(self):

        try:
            # ****************************************step1 数据处理**********************************************#
            Load_train = LoadDataset([train_image, train_label], crop_size)
            Load_val = LoadDataset([val_image, val_label], crop_size)

            train_data = DataLoader(Load_train, batch_size=batchsize)
            val_data = DataLoader(Load_val, batch_size=batchsize)

            # *****************************************step2 模型*********************************************#
            net = UNet(in_channels=3, num_classes=2)
            net = net.to(device)

            # ******************************************step3 损失函数********************************************#
            criterion = nn.NLLLoss().to(device)  # NLLLoss有利于最后激活层的替换

            # ******************************************step4 优化器********************************************#
            optimizer = optim.Adam(net.parameters(), lr=1e-4)

            # ******************************************step5 训练********************************************#
            for epoch in range(num_epoch):
                '''
                训练
                '''
                best = [0]  # 存储最优指标,用于Early Stopping
                net = net.train()  # 指定模型为训练模式,即可以进行参数更新
                print('Epoch is [{}/{}]'.format(epoch + 1, num_epoch))
                self.richText.insert(INSERT,'Epoch is [{}/{}]'.format(epoch + 1, num_epoch))
                # 每20次epoch,lr学习率降一半
                if epoch % 20 == 0 and epoch != 0:
                    for group in optimizer.param_groups:
                        group['lr'] *= 0.5
                # 指标初始化
                train_loss = 0
                train_pa = 0
                train_mpa = 0
                train_miou = 0
                train_fwiou = 0
                # 训练批次
                for i, sample in enumerate(train_data):
                    # 载入数据
                    img_data = sample['img'].to(device)
                    img_label = sample['label'].to(device)
                    # 训练
                    out = net(img_data)
                    out = F.log_softmax(out, dim=1)
                    loss = criterion(out, img_label)  # loss计算
                    optimizer.zero_grad()  # 需要梯度清零,再反向传播
                    loss.backward()  # 反向传播
                    optimizer.step()  # 参数更新
                    train_loss += loss.item()  # loss累加
                    # 评估
                    # 预测值
                    pre_label = out.max(dim=1)[1].data.cpu().numpy()  # [1]:表示返回索引
                    pre_label = [i for i in pre_label]
                    # 真实值
                    true_label = img_label.data.cpu().numpy()
                    true_label = [i for i in true_label]
                    # 计算所有的评价指标
                    eval_metrix = eval_semantic_segmentation(pre_label, true_label)
                    # 各评价指标计算
                    train_pa += eval_metrix['pa']
                    train_mpa += eval_metrix['mpa']
                    train_miou += eval_metrix['miou']
                    train_fwiou += eval_metrix['fwiou']
                    #  打印损失
                    print('|batch[{}/{}]|batch_loss {: .8f}|'.format(i + 1, len(train_data), loss.item()))
                #  评价指标打印格式定义
                metric_description = '|Train PA|: {:.5f}|\n|Train MPA|: {:.5f}|\n|Train MIou|: {:.5f}|\n|Train FWIou|: {:.5f}|'.format(
                    train_pa / len(train_data),
                    train_mpa / len(train_data),
                    train_miou / len(train_data),
                    train_fwiou / len(train_data),
                )
                #  打印评价指标
                print(metric_description)
                #  根据train_miou,保存最优模型
                if max(best) <= train_miou / len(train_data):
                    best.append(train_miou / len(train_data))
                    torch.save(net.state_dict(), path_saved_model)

                '''
                验证
                '''
                if (epoch + 1) % val_interval == 0:  # val_interval=1 表示每一个epoch打印一次验证信息
                    net.eval()  # 模型保持静止,不进行更新,从而来验证
                    eval_loss = 0
                    eval_acc = 0
                    eval_miou = 0
                    eval_class_acc = 0

                    prec_time = datetime.now()
                    for j, sample in enumerate(val_data):
                        valImg = sample['img'].to(device)
                        valLabel = sample['label'].long().to(device)

                        out = net(valImg)
                        out = F.log_softmax(out, dim=1)
                        loss = criterion(out, valLabel)
                        eval_loss = loss.item() + eval_loss
                        pre_label = out.max(dim=1)[1].data.cpu().numpy()
                        pre_label = [i for i in pre_label]

                        true_label = valLabel.data.cpu().numpy()
                        true_label = [i for i in true_label]

                        eval_metrics = eval_semantic_segmentation(pre_label, true_label)
                        eval_acc = eval_metrics['mpa'] + eval_acc
                        eval_miou = eval_metrics['miou'] + eval_miou

                    cur_time = datetime.now()
                    h, remainder = divmod((cur_time - prec_time).seconds, 3600)
                    m, s = divmod(remainder, 60)
                    time_str = 'Time: {:.0f}:{:.0f}:{:.0f}'.format(h, m, s)

                    val_str = (
                        '|Valid Loss|: {:.5f} \n|Valid Acc|: {:.5f} \n|Valid Mean IU|: {:.5f} \n|Valid Class Acc|:{:}'.format(
                            eval_loss / len(train_data),
                            0,
                            eval_miou / len(val_data),
                            0))
                    print(val_str)
                    print(time_str)

        except Exception as e:
            showinfo("消息提示", str(e))



if __name__ == '__main__':
    this_win = SegWindow()
    this_win.mainloop()







Winform测试:

HttpWebRequest req = (HttpWebRequest)HttpWebRequest.Create("http://127.0.0.1:5000/");
req.Method = "GET";
using (WebResponse wr = req.GetResponse())
{
    using (StreamReader reader = new StreamReader(wr.GetResponseStream()))
    {
        string result = reader.ReadToEnd();
        // System.Console.Write(result);
        MessageBox.Show(result);
    }

}

from io import BytesIO
from LearningDemo.ClsDemo.Cls_Predict import predict as cls_predict
from flask import Flask, request,url_for
from flask_restful import Api,Resource,reqparse
import cv2
from PIL import Image
import base64

'''
生成Flask生成实例
'''
app = Flask(__name__)
api = Api(app)

'''
测试连通
'''
class TestForGetView(Resource):
    def get(self):
       res = "hello world"
       return {"res":res}


'''
测试接受json数据
'''
class TestForGetJSON(Resource):
    def get(self):
        # 1.接受json数据
        parser = reqparse.RequestParser()
        parser.add_argument('source')
        parser.add_argument('operation')
        parser.add_argument('destination')
        args = parser.parse_args()
        # 2.处理数据
        print(args)
        print(args.source)
        print(args.operation)
        print(args.destination)
        # 3.返回处理结果(json格式)pyt
        return args


'''
测试接受文件
'''
class TestForPostFile(Resource):
    def post(self):
        file = request.get_data()
        # print(file)
        # print(type(file))
        with open("11.pdf", "wb") as f:
            f.write(file)
        return 'OK'


'''
图像分类
'''
class PredictForClassificationView(Resource):
    def post(self):
        # 1.读取bytes流
        file = request.get_data()
        # 2.bytes流转图像
        img = Image.open(BytesIO(file)).convert('RGB')
        # img.show()
        # 3.预测
        res = cls_predict(img)
        # 4.返回结果
        return res


'''
图像分割
'''
# class PredictForSegementationView(Resource):
#     def post(self):
#         # 1.读取bytes流
#         file = request.get_data()
#         # 2.bytes流转图像
#         img = Image.open(BytesIO(file)).convert('RGB')
#
#         img_url = r'D:\workplace\python\AI_Demo\test_for_classification.png'
#         with open(img_url, 'rb') as f:
#             base64_data = base64.b64encode(f.read())
#             print(base64_data)
#         return base64_data


@app.route("/predict_for_segementation", methods=['POST'])
def PredictForSegementationAPI():
    file = request.get_data()

    img = Image.open(BytesIO(file)).convert('RGB')



    img_url = r'D:\workplace\python\AI_Demo\test_for_classification.png'
    img = cv2.imread(img_url)
    _, img_encode = cv2.imencode('.jpg', img)
    img_bytes = img_encode.tobytes()
    base64_data = base64.b64encode(img_bytes)

    return base64_data


'''
图像检测
'''
class PredictForDetectionView(Resource):
    def post(self):
        file = request.get_data()
        # print(file)
        # print(type(file))
        with open("test_for_detection.jpg", "wb") as f:
            f.write(file)
        return 'OK'


'''
注册API
'''
api.add_resource(TestForGetView,'/')
api.add_resource(TestForGetJSON,'/test_for_json')
api.add_resource(TestForPostFile,'/test_for_postfile')
api.add_resource(PredictForClassificationView,'/predict_for_classification')
# api.add_resource(PredictForSegementationView,'/predict_for_segementation')
api.add_resource(PredictForDetectionView,'/predict_for_detection')


if __name__ == "__main__":
    app.run(host="127.0.0.1",port=5000)




using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Text;
using System.Threading.Tasks;
using System.Drawing;
using System.Diagnostics;
using System.Threading;
using System.Windows.Forms;

namespace Algo
{
    public class ServiceAPI
    {

        public bool RunApplication()
        {
            try
            {
                //调用Python程序
                Process p = new Process();//开启一个新进程
                string filePath = @"D:\workplace\python\AI_Demo\Server.py";//参数由目标应用程序进行分析和解释,因此必须与该应用程序的预期保持一致。
                p.StartInfo.FileName = @"D:\software\Anaconda3\python.exe";//要启动的应用程序的名称
                p.StartInfo.Arguments = filePath;
                p.StartInfo.UseShellExecute = false;//不使用shell
                p.StartInfo.RedirectStandardOutput = true;//捕获输出的消息
                p.StartInfo.RedirectStandardInput = true;//接受来自调用程序的输入信息
                p.StartInfo.RedirectStandardError = true;//重定向标准错误输出
                p.StartInfo.CreateNoWindow = true;//为true,则启动该进程而不新建窗口
                p.StartInfo.StandardErrorEncoding = Encoding.UTF8;//将编码设置成utf-8,保证中文不会乱码。
                p.Start();//开始进程
                p.WaitForExit();
                p.Close();
                MessageBox.Show("服务已启用!");

            }
            catch (Exception ex)
            {
                MessageBox.Show("报错信息:" + ex.Message);
            }
         

            return true;
        }
        public string TestConnection()
        {

            HttpWebRequest req = (HttpWebRequest)HttpWebRequest.Create("http://127.0.0.1:5000/");
            req.Method = "GET";
            string result = string.Empty;
            using (WebResponse wr = req.GetResponse())
            {
                using (StreamReader reader = new StreamReader(wr.GetResponseStream()))
                {
                    result = reader.ReadToEnd();
                    System.Console.Write(result);
                }

            }

            return result;
        }
        /// <summary>
        /// 图像分类
        /// </summary>
        /// <param name="img_path"></param>
        /// <returns></returns>
        public string Cls(string img_path)
        {
            // 确定上传文件
            string filePath = img_path;
            byte[] fileContentByte = new byte[1024]; // 文件内容二进制

            // 将文件转成二进制
            FileStream fs = new FileStream(filePath, FileMode.Open, FileAccess.Read);
            fileContentByte = new byte[fs.Length];
            fs.Read(fileContentByte, 0, Convert.ToInt32(fs.Length));
            fs.Close();


            HttpWebRequest request = (HttpWebRequest)WebRequest.Create("http://127.0.0.1:5000/predict_for_classification");
            request.Method = "POST";
            string boundary = "major";
            request.ContentType = "multipart/form-data;boundary=" + boundary;

            //定义请求流
            Stream myRequestStream = request.GetRequestStream();
            myRequestStream.Write(fileContentByte, 0, fileContentByte.Length);

            //发送
            HttpWebResponse response = (HttpWebResponse)request.GetResponse();

            //获取返回值
            Stream myResponseStream = response.GetResponseStream();
            StreamReader myStreamReader = new StreamReader(myResponseStream, Encoding.GetEncoding("utf-8"));
            string retString = myStreamReader.ReadToEnd();
            Console.WriteLine(retString);

            // 关闭资源
            myStreamReader.Close();
            myResponseStream.Close();

            return retString;
        }

        /// <summary>
        /// 图像分割
        /// </summary>
        /// <param name="img_path"></param>
        /// <returns></returns>
        public Bitmap Seg(string img_path)
        {
            // 确定上传文件
            string filePath = img_path;
            byte[] fileContentByte = new byte[1024]; // 文件内容二进制

            // 将文件转成二进制
            FileStream fs = new FileStream(filePath, FileMode.Open, FileAccess.Read);
            fileContentByte = new byte[fs.Length];
            fs.Read(fileContentByte, 0, Convert.ToInt32(fs.Length));
            fs.Close();


            HttpWebRequest request = (HttpWebRequest)WebRequest.Create("http://127.0.0.1:5000/predict_for_segementation");
            request.Method = "POST";
            string boundary = "major";
            request.ContentType = "multipart/form-data;boundary=" + boundary;

            //定义请求流
            Stream myRequestStream = request.GetRequestStream();
            myRequestStream.Write(fileContentByte, 0, fileContentByte.Length);

            //发送
            HttpWebResponse response = (HttpWebResponse)request.GetResponse();

            //获取返回值
            Stream myResponseStream = response.GetResponseStream();
            StreamReader myStreamReader = new StreamReader(myResponseStream, Encoding.GetEncoding("utf-8"));
            string retString = myStreamReader.ReadToEnd();

            Console.WriteLine(retString);

            byte[] bytes = Convert.FromBase64String(retString);
            MemoryStream ms = new MemoryStream();
            ms.Write(bytes, 0, bytes.Length);
            Bitmap bmp = new Bitmap(ms);
            // 关闭资源
            myStreamReader.Close();
            myResponseStream.Close();
            return bmp;

        }

        /// <summary>
        /// 图像检测
        /// </summary>
        /// <param name="img_path"></param>
        /// <returns></returns>
        public string Det(string img_path)
        {
            // 确定上传文件
            string filePath = img_path;
            byte[] fileContentByte = new byte[1024]; // 文件内容二进制

            // 将文件转成二进制
            FileStream fs = new FileStream(filePath, FileMode.Open, FileAccess.Read);
            fileContentByte = new byte[fs.Length];
            fs.Read(fileContentByte, 0, Convert.ToInt32(fs.Length));
            fs.Close();


            HttpWebRequest request = (HttpWebRequest)WebRequest.Create("http://127.0.0.1:5000/predict_for_detection");
            request.Method = "POST";
            string boundary = "major";
            request.ContentType = "multipart/form-data;boundary=" + boundary;

            //定义请求流
            Stream myRequestStream = request.GetRequestStream();
            myRequestStream.Write(fileContentByte, 0, fileContentByte.Length);

            //发送
            HttpWebResponse response = (HttpWebResponse)request.GetResponse();

            //获取返回值
            Stream myResponseStream = response.GetResponseStream();
            StreamReader myStreamReader = new StreamReader(myResponseStream, Encoding.GetEncoding("utf-8"));
            string retString = myStreamReader.ReadToEnd();
            Console.WriteLine(retString);

            // 关闭资源
            myStreamReader.Close();
            myResponseStream.Close();

            return retString;
        }

        /// <summary>
        /// 训练图像分类任务
        /// </summary>
        /// <returns></returns>
        public string TrainCls()
        {
            FileStream fs1 = new FileStream(@"D:\workplace\python\AI_Demo\LearningDemo\ClsDemo\图像分类任务日志.log", FileMode.Open, FileAccess.Read, FileShare.ReadWrite);

            StreamReader sr1 = new StreamReader(fs1, System.Text.Encoding.UTF8);

            StringBuilder sb1 = new StringBuilder();

            while (!sr1.EndOfStream)
            {
                sb1.AppendLine(sr1.ReadLine() + "<br>");
            }

            return sb1.ToString();
        }

        /// <summary>
        /// 训练图像分割任务
        /// </summary>
        /// <returns></returns>
        public string TrainSeg()
        {
            FileStream fs1 = new FileStream(@"D:\workplace\python\AI_Demo\LearningDemo\SegDemo\图像分割任务日志.log", FileMode.Open, FileAccess.Read, FileShare.ReadWrite);

            StreamReader sr1 = new StreamReader(fs1, System.Text.Encoding.UTF8);

            StringBuilder sb1 = new StringBuilder();

            while (!sr1.EndOfStream)
            {
                sb1.AppendLine(sr1.ReadLine() + "<br>");
            }

            return sb1.ToString();

        }

        /// <summary>
        /// 训练图像检测任务
        /// </summary>
        /// <returns></returns>
        public string TrainDet()
        {

            FileStream fs1 = new FileStream(@"D:\workplace\python\AI_Demo\LearningDemo\DetDemo\图像检测任务日志.log", FileMode.Open, FileAccess.Read, FileShare.ReadWrite);

            StreamReader sr1 = new StreamReader(fs1, System.Text.Encoding.UTF8);

            StringBuilder sb1 = new StringBuilder();

            while (!sr1.EndOfStream)
            {
                sb1.AppendLine(sr1.ReadLine() + "<br>");
            }

            return sb1.ToString();
        }
    }
}


using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Diagnostics;
using System.Drawing;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Forms;
using Algo;
namespace Test4Python
{
    public partial class Form1 : Form
    {
        public Form1()
        {
            InitializeComponent();
            ServiceAPI serApi = new ServiceAPI();
           
        }


        /// <summary>
        /// 连接测试
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void button1_Click(object sender, EventArgs e)
        {
            

            ServiceAPI serApi = new ServiceAPI();
            
            bool res2 = serApi.RunApplication();
            if(res2)
            {
                MessageBox.Show("成功");
            }
            else
            {
                MessageBox.Show("失败");
            }

        }


        /// <summary>
        /// 分类
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void button2_Click(object sender, EventArgs e)
        {
            ServiceAPI serApi = new ServiceAPI();
            string res = serApi.Cls(@"D:\workplace\python\AI_Demo\LearningDemo\ClsDemo\trainData\直边\E\FG-E-10-2.bmp");
            MessageBox.Show(res);
        }


        /// <summary>
        /// 分割
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void button3_Click(object sender, EventArgs e)
        {
            ServiceAPI serApi = new ServiceAPI();
            Bitmap res = serApi.Seg(@"D:\workplace\python\MINIST-master\test0.png");

            pictureBox1.Image = res;

        }


        /// <summary>
        /// 检测
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void button4_Click(object sender, EventArgs e)
        {
            ServiceAPI serApi = new ServiceAPI();
            string res = serApi.Det(@"D:\workplace\python\MINIST-master\test0.png");
            MessageBox.Show(res);
        }


        /// <summary>
        /// 分类训练
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void button5_Click(object sender, EventArgs e)
        {

            ServiceAPI serApi = new ServiceAPI();
            richTextBox1.Text = serApi.TrainCls();
            // /自动滚到最后一行
            richTextBox1.SelectionStart = richTextBox1.Text.Length;
            richTextBox1.ScrollToCaret();

        }


        /// <summary>
        /// 分割训练
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void button6_Click(object sender, EventArgs e)
        {
            ServiceAPI serApi = new ServiceAPI();
            richTextBox1.Text = serApi.TrainSeg();
            // /自动滚到最后一行
            richTextBox1.SelectionStart = richTextBox1.Text.Length;
            richTextBox1.ScrollToCaret();
        }
        /// <summary>
        /// 检测训练
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void button7_Click(object sender, EventArgs e)
        {
            ServiceAPI serApi = new ServiceAPI();
            richTextBox1.Text = serApi.TrainDet();
            // /自动滚到最后一行
            richTextBox1.SelectionStart = richTextBox1.Text.Length;
            richTextBox1.ScrollToCaret();
        }

        private void textBox1_TextChanged(object sender, EventArgs e)
        {

        }
    }
}

from io import BytesIO
from LearningDemo.ClsDemo.Cls_Predict import predict as cls_predict
from flask import Flask, request,url_for
from flask_restful import Api,Resource,reqparse
import cv2
from PIL import Image
import base64

'''
生成Flask生成实例
'''
app = Flask(__name__)
api = Api(app)

'''
测试连通
'''
class TestForGetView(Resource):
    def get(self):
       res = "hello world"
       return {"res":res}


'''
测试接受json数据
'''
class TestForGetJSON(Resource):
    def get(self):
        # 1.接受json数据
        parser = reqparse.RequestParser()
        parser.add_argument('source')
        parser.add_argument('operation')
        parser.add_argument('destination')
        args = parser.parse_args()
        # 2.处理数据
        print(args)
        print(args.source)
        print(args.operation)
        print(args.destination)
        # 3.返回处理结果(json格式)pyt
        return args


'''
测试接受文件
'''
class TestForPostFile(Resource):
    def post(self):
        file = request.get_data()
        # print(file)
        # print(type(file))
        with open("11.pdf", "wb") as f:
            f.write(file)
        return 'OK'


'''
图像分类
'''
class PredictForClassificationView(Resource):
    def post(self):
        # 1.读取bytes流
        file = request.get_data()
        # 2.bytes流转图像
        img = Image.open(BytesIO(file)).convert('RGB')
        # img.show()
        # 3.预测
        res = cls_predict(img)
        # 4.返回结果
        return res


'''
图像分割
'''
# class PredictForSegementationView(Resource):
#     def post(self):
#         # 1.读取bytes流
#         file = request.get_data()
#         # 2.bytes流转图像
#         img = Image.open(BytesIO(file)).convert('RGB')
#
#         img_url = r'D:\workplace\python\AI_Demo\test_for_classification.png'
#         with open(img_url, 'rb') as f:
#             base64_data = base64.b64encode(f.read())
#             print(base64_data)
#         return base64_data


@app.route("/predict_for_segementation", methods=['POST'])
def PredictForSegementationAPI():
    file = request.get_data()

    img = Image.open(BytesIO(file)).convert('RGB')



    img_url = r'D:\workplace\python\AI_Demo\test_for_classification.png'
    img = cv2.imread(img_url)
    _, img_encode = cv2.imencode('.jpg', img)
    img_bytes = img_encode.tobytes()
    base64_data = base64.b64encode(img_bytes)

    return base64_data


'''
图像检测
'''
class PredictForDetectionView(Resource):
    def post(self):
        file = request.get_data()
        # print(file)
        # print(type(file))
        with open("test_for_detection.jpg", "wb") as f:
            f.write(file)
        return 'OK'


'''
注册API
'''
api.add_resource(TestForGetView,'/')
api.add_resource(TestForGetJSON,'/test_for_json')
api.add_resource(TestForPostFile,'/test_for_postfile')
api.add_resource(PredictForClassificationView,'/predict_for_classification')
# api.add_resource(PredictForSegementationView,'/predict_for_segementation')
api.add_resource(PredictForDetectionView,'/predict_for_detection')




if __name__ == '__main__':
    app.run(debug=True, host='127.0.0.1', port=8083)
# coding=utf-8
import platform
if platform.system() == "Windows":
    import asyncio
    asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from webService import app
import logging
# 确定日期格式
DATEFMT = "[%Y-%m-%d %H:%M:%S]"
# 确定日志格式
FORMAT = "%(asctime)s %(thread)d %(message)s"
# 配置日志参数
logging.basicConfig(level=logging.INFO,
                    format=FORMAT,
                    datefmt=DATEFMT,
                    filename=r'D:\workplace\python\AI_Demo\tornado_log.log')


if __name__ == '__main__':
    http_server = HTTPServer(WSGIContainer(app))
    http_server.listen(5000)
    IOLoop.instance().start()


import time
from PIL import Image
import torchvision.transforms as transforms
import torch
import torch.nn as nn


'''
  网络模型
'''
class AlexNet(nn.Module):
    def __init__(self, num_classes=1000, num_linear=9216):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(num_linear, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x

# 标签和类别的映射关系
classes = ["A", "B", "C", "D", "E"]
device =  torch.device('cpu')
# 1.model.eval()
# 2.torch.no_grad()
# 3.数据预处理保持一致
# 4.预测时间的计算
norm_mean = [0.33424968,0.33424437, 0.33428448]
norm_std = [0.24796878, 0.24796101, 0.24801227]

inference_transform = transforms.Compose([
    transforms.Resize((128, 128)),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

def preprocessing(img,transform = None):
    if transforms is None:
        raise Exception("无transform进行预处理")
    img_tensor = transform(img)
    return img_tensor


'''
服务API
'''
def predict(img_rgb):
    # 2. model
    saved_model_path = r'D:\workplace\python\AI_Demo\LearningDemo\ClsDemo\best_model.pth'
    net = AlexNet(num_classes=5, num_linear=9216)  # 对应修改模型 net = se_resnet50(num_classes=5,pretrained=True)
    net.load_state_dict(torch.load(saved_model_path))
    net.to(device)
    net.eval()
    # 3.单图predict
    with torch.no_grad():
        # step 2/4 : img --> tensor
        img_tensor = preprocessing(img_rgb, inference_transform)
        img_tensor.unsqueeze_(0)
        img_tensor = img_tensor.to(device)

        # step 3/4 : tensor --> vector
        outputs = net(img_tensor)

        # step 4/4 : visualization
        _, pred_int = torch.max(outputs, 1)
        pred_str = classes[int(pred_int.data.cpu().numpy())]
        return pred_str

if __name__ == "__main__":
    # 1. data
    img_path = r"D:\workplace\python\AI_Demo\LearningDemo\ClsDemo\split_data\train\E\FG-E-10-1.bmp"
    # 2. model
    saved_model_path = './best_model.pth'
    net = AlexNet(num_classes=5, num_linear=9216)  # 对应修改模型 net = se_resnet50(num_classes=5,pretrained=True)
    net.load_state_dict(torch.load(saved_model_path))
    net.to(device)
    net.eval()
    # 3.单图predict
    with torch.no_grad():
        # step 1/4 : path --> img
        img_rgb = Image.open(img_path).convert('RGB')

        # step 2/4 : img --> tensor
        img_tensor = preprocessing(img_rgb,inference_transform)
        img_tensor.unsqueeze_(0)
        img_tensor = img_tensor.to(device)

        # step 3/4 : tensor --> vector
        time_start = time.time()
        outputs = net(img_tensor)
        time_end = time.time()

        # step 4/4 : visualization
        print(outputs)
        _,pred_int = torch.max(outputs,1)
        print(pred_int)
        pred_str = classes[int(pred_int.data.cpu().numpy())]
        print(pred_str)




# ============================ 导入工具包包 ============================
import numpy as np
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torch.optim as optim
from matplotlib import pyplot as plt
import os
import random
from PIL import Image
from torch.utils.data import Dataset
import torch
import torch.nn as nn

import os
import logging

# 确定日期格式
DATEFMT = "[%Y-%m-%d %H:%M:%S]"
# 确定日志格式
FORMAT = "%(asctime)s %(thread)d %(message)s"
# 保存日志的位置
CLIENTLOGPATH = os.path.join(r"D:\workplace\python\AI_Demo\LearningDemo\ClsDemo", '图像分类任务日志.log')
# 配置日志参数
logging.basicConfig(level=logging.INFO, format=FORMAT, datefmt=DATEFMT,
                    filename=CLIENTLOGPATH)


'''
  网络模型
'''
class AlexNet(nn.Module):
    def __init__(self, num_classes=1000, num_linear=9216):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(num_linear, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x



# 类别对应表
dict_label = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4}  # 如果改了分类目标,这里需要修改

# 返回所有图片路径和标签
def get_img_info(data_dir):
    data_info = list()
    for root, dirs, _ in os.walk(data_dir):
        # 遍历类别
        for sub_dir in dirs:
            img_names = os.listdir(os.path.join(root, sub_dir))
            # img_names = list(filter(lambda x: x.endswith('.png'), img_names))   # 如果改了图片格式,这里需要修改
            # 遍历图片
            for i in range(len(img_names)):
                img_name = img_names[i]
                path_img = os.path.join(root, sub_dir, img_name)
                label = dict_label[sub_dir]
                data_info.append((path_img, int(label)))
    return data_info

# 主要是用来接受索引返回样本用的
class LoadDataset(Dataset):
    def __init__(self, data_dir, transform=None):
        """
        :param data_dir: str, 数据集所在路径
        :param transform: torch.transform,数据预处理
        """
        self.label_name = dict_label  # 如果改了分类目标,这里需要修改
        self.data_info = get_img_info(data_dir)  # data_info存储所有图片路径和标签,在DataLoader中通过index读取样本
        self.transform = transform

    #接受一个索引,返回一个样本 ---  img, label
    def __getitem__(self, index):
        path_img, label = self.data_info[index]
        img = Image.open(path_img).convert('RGB')     # 0~255
        if self.transform is not None:
            img = self.transform(img)   # 在这里做transform,转为tensor等等
        return img, label

    def __len__(self):
        return len(self.data_info)




# ============================ 辅助函数 ============================


# ============================ step 0/5 参数设置 ============================
random.seed(1)
# 14.(norm_mean,norm_std):数据集的均值和标准差
norm_mean = [0.33424968,0.33424437, 0.33428448]
norm_std = [0.24796878, 0.24796101, 0.24801227]

MAX_EPOCH = 10
BATCH_SIZE = 4
LR = 0.001
log_interval = 10
val_interval = 1
saved_model_path ='./best_model.pth'
# 5.训练集的图片路径
train_image_path = r".\split_data/train"  # r'./major_dataset_repo/major_collected_dataset/train/image'

# 6.验证集的图片路径
val_image_path = r'.\split_data/valid'

# ============================ step 1/5 数据 ============================
# 训练数据预处理
train_transform = transforms.Compose([
    transforms.Resize((128, 128)),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])
# 验证数据预处理
valid_transform = transforms.Compose([
    transforms.Resize((128, 128)),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

# 构建训练集的Dataset和DataLoader
train_data = LoadDataset(data_dir=train_image_path, transform=train_transform)
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)  # shuffle训练时打乱样本

# 构建验证集的Dataset和DataLoader
valid_data = LoadDataset(data_dir=val_image_path, transform=valid_transform)
valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)

# ============================ step 2/5 模型 ============================
net = AlexNet(num_classes=5,num_linear=9216)  # 对应修改模型 net = se_resnet50(num_classes=5,pretrained=True)
net.load_state_dict(torch.load(saved_model_path))
# ============================ step 3/5 损失函数 ============================
criterion = nn.CrossEntropyLoss()                                                   # 选择损失函数

# ============================ step 4/5 优化器 ============================
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)                        # 选择优化器
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)     # 设置学习率下降策略,每过step_size个epoch,做一次更新

# ============================ step 5/5 训练 ============================
train_curve = list()
valid_curve = list()

for epoch in range(MAX_EPOCH):
    loss_mean = 0.
    correct = 0.
    total = 0.
    # incorrect=0.
    net.train()

    #  path_model_state_dict = saved_model_path
    # torch.save(net.state_dict(), path_model_state_dict)
    # 打印当前学习率
    print(optimizer.state_dict()['param_groups'][0]['lr'])
    for i, data in enumerate(train_loader):# 获取数据
        # forward
        inputs, labels = data
        outputs = net(inputs)
        # backward
        optimizer.zero_grad()  # 梯度置零,设置在loss之前
        loss = criterion(outputs, labels)  # 一个batch的loss
        loss.backward()  # loss反向传播
        # update weights
        optimizer.step()  # 更新所有的参数
        # 统计分类情况
        _, predicted = torch.max(outputs.data, 1)  # 1 返回索引的意思
        total += labels.size(0)
        correct += (predicted == labels).squeeze().sum().numpy()  # 计算一共正确的个数
        loss_mean += loss.item()  # 计算一共的loss
        train_curve.append(loss.item())  # 训练曲线,用于显示

        if (i+1) % log_interval == 0:   # log_interval=10 表示每迭代10次,打印一次训练信息,在这里bachsize=16 迭代10次就是160张图片,即total=160
            loss_mean = loss_mean / log_interval  # 取平均loss
            print("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total))
            # 写入日志
            logging.info("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total))
            correct=correct
            total=total   # total=160
            # 保存训练信息,即写日志
            f = open("log_training.txt", 'a')  # 若文件不存在,系统自动创建。'a'表示可连续写入到文件,保留原内容,在原
            # 内容之后写入。可修改该模式('w+','w','wb'等)
            f.write("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total))  # 将字符串写入文件中
            # 写入日志
            logging.info("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total))
            f.write("\n")  # 换行
            f.close()
            loss_mean = 0.  # 每次需要清0

    scheduler.step()  # 更新学习率


    # validate the model
    if (epoch+1) % val_interval == 0:  # val_interval=1 表示每一个epoch打印一次验证信息
        correct_val = 0. #  正确值
        total_val = 0.  # 一共的
        loss_val = 0.  # 损失
        net.eval()  # 模型保持静止,不进行更新,从而来验证
        with torch.no_grad():  # 不保存梯度,减少内存消耗,提高运行速度
            for j, data in enumerate(valid_loader):
                inputs, labels = data
                outputs = net(inputs)
                loss = criterion(outputs, labels)
                _, predicted = torch.max(outputs.data, 1)
                total_val += labels.size(0)
                correct_val += (predicted == labels).squeeze().sum().numpy()
                loss_val += loss.item()
            valid_curve.append(loss_val/valid_loader.__len__())
            print("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val, correct_val / total_val))
            # 写入日志
            logging.info("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val, correct_val / total_val))
            f = open("log_training.txt", 'a')  # 若文件不存在,系统自动创建。'a'表示可连续写入到文件,保留原内容,在原
            # 内容之后写入。可修改该模式('w+','w','wb'等)
            f.write("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val, correct_val / total_val))  # 将字符串写入文件中
            # 写入日志
            logging.info("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val, correct_val / total_val))
            f.write("\n")  # 换行
            f.close()

train_x = range(len(train_curve))
train_y = train_curve

train_iters = len(train_loader)
valid_x = np.arange(1, len(valid_curve)+1) * train_iters*val_interval # 由于valid中记录的是epochloss,需要对记录点进行转换到iterations
valid_y = valid_curve

plt.plot(train_x, train_y, label='Train')
plt.plot(valid_x, valid_y, label='Valid')

plt.legend(loc='upper right')
plt.ylabel('loss value')
plt.xlabel('Iteration')
plt.show()




window安装supervisor

pip install git+https://github.com/alexsilva/supervisor@windows -U

https://www.jianshu.com/p/43b755ad027e


import os
import json
import base64
import os.path as osp
import numpy as np
from PIL import Image
from labelme import utils

# json目录
data_path = r'D:\dataset\Test\train\annotations'
# 存mask目录
seg_label = os.path.join(data_path, 'labels')
# 存图目录
seg_image = os.path.join(data_path, 'images')


class Json2Ana:
    def __init__(self, index):
        # 处理的第几个图像索引
        self.index = index
        # 默认添加背景类
        self.label_name_to_value = {'_background_': 0}

    def Analysis(self, file_path, resize_img=None):
        alist = os.listdir(file_path)
        for i in range(0, len(alist)):
            if os.path.splitext(alist[i])[1] == '.json':
                file_name, _ = os.path.splitext(alist[i])
                path = os.path.join(file_path, alist[i])
                data = json.load(open(path))

                '''
                 判断data中的点数是否满足条件
                '''
                m_flag = False  #
                for shape in data['shapes']:
                    shape_type = shape['shape_type']
                    points = shape['points']
                    if (shape_type == "circle" and len(points) != 2) \
                            or (shape_type == "rectangle" and len(points) != 2) \
                            or (shape_type == "line" and len(points) != 2) \
                            or (shape_type == "point" and len(points) != 1) \
                            or (shape_type == "polygon" and len(points) < 3):
                        m_flag = True
                if m_flag:
                    continue

                out_dir = osp.basename(path).replace('.', '_')
                out_dir = osp.join(osp.dirname(path), out_dir)
                # 读取图像路径,转为np
                imagePath = os.path.join(data_path, data['imagePath'])
                with open(imagePath, 'rb') as f:
                    imageData = f.read()
                    imageData = base64.b64encode(imageData).decode('utf-8')
                img = utils.img_b64_to_arr(imageData)
                '''
                构建类名和label的字典-label_name_to_value
                '''
                for shape in data['shapes']:
                    # 获取类名,不同类名则添加
                    label_name = shape['label']
                    if label_name in self.label_name_to_value:
                        label_value = self.label_name_to_value[label_name]
                    else:
                        label_value = len(self.label_name_to_value)
                        self.label_name_to_value[label_name] = label_value
                        # classes=1,2,3...   label_value=1,2,3...
                        print('Found new class name:{}/ value: {}'.format(label_name, label_value))
                # 构建mask-lbl
                lbl, _ = utils.shapes_to_label(img.shape, data['shapes'], self.label_name_to_value)

                # 判断所在目录下是否有该文件夹,若没有则创建目录
                if not os.path.exists(seg_image):
                    os.mkdir(seg_image)
                if not os.path.exists(seg_label):
                    os.mkdir(seg_label)

                if resize_img is not None:
                    img = img.resize(resize_img)
                    lbl = lbl.resize(resize_img)
                    Image.fromarray(img.astype(np.uint8), mode="P").save(
                        osp.join(seg_image, '%s.png' % file_name))  # 保存图像名称
                    utils.lblsave(osp.join(seg_label, '%s_show.png' % file_name), lbl)  # 保存显示标签
                    lbl_pil = Image.fromarray(lbl.astype(np.uint8), mode="P")
                    lbl_pil.save(osp.join(seg_label, '%s.png' % file_name))  # 保存标签

                else:
                    # 保存图像
                    Image.fromarray(img.astype(np.uint8)).save(
                        osp.join(seg_image, '%s.png' % file_name))  # 保存图像名称
                    # 保存label
                    utils.lblsave(osp.join(seg_label, '%s_show.png' % file_name), lbl)  # 保存显示标签
                    lbl_pil = Image.fromarray(lbl.astype(np.uint8), mode="P")
                    lbl_pil.save(osp.join(seg_label, '%s.png' % file_name))  # 保存标签
                print('[Index: %d]==>Finished Saved : %s' % (self.index, out_dir))
                self.index += 1

    def Info_print(self):
        print(self.label_name_to_value)
        print('Total class_nums: %d' % (len(self.label_name_to_value)))

    def main(self, path, multi_files=False, resize_img=None,param_path="./params.log"):
        if multi_files:
            files = os.listdir(path)
            for file in files:
                self.Analysis(osp.join(path, file), resize_img=resize_img)
        else:
            self.Analysis(file_path=path, resize_img=resize_img)
            data2json = json.dumps(self.label_name_to_value)
            with open(param_path, "w") as f:
                f.write(data2json)
            print(data2json)



if __name__ == '__main__':
    Json = Json2Ana(index=0)
    Json.main(path=data_path, multi_files=False, resize_img=None,param_path = "./params.log")
    Json.Info_print()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值