神经网络垃圾分类全流程

 

Task1  数据集预处理

import os
from os.path import join, exists
import random
import numpy as np
from PIL import Image
import shutil
import matplotlib.pyplot as plt

train_lj = "E:\数据\垃圾分类\\train"
test_lj ="E:\数据\垃圾分类\\test"
# 移动后的数据集images路径
SRC_ROOT = '../space/images'

def find_unlabeled():
    for root, _, files in os.walk(SRC_ROOT):
        name = []
        for fname in files:
            a = fname.split("_")[0]
            if a != "daisy" and a != "dandelion" and a != "rose" and a != "sunflower" and a != "tulip":
                name1 = fname
                img = Image.open(join(SRC_ROOT, fname))
                img.show()
                label1 = input("请输入观察到图片的类别:")
                print(name1,label1)
                label_img(name1, label1)



def label_img(fname, label):
     new_name = "_".join([label, fname])
     print(new_name)
     os.rename(join(SRC_ROOT, fname), join(SRC_ROOT, new_name))


def regulate_filename():
    """规范文件命名方式"""
    for _, _, files in os.walk(train_lj):
        for fname in files:
            wjm, hzm = fname.split(".")
            labels, xlh = wjm.split("_")
            NEW_NAME = " ".join(["hn1", xlh, labels]) + "." + hzm
            ylj = join(SRC_ROOT, fname)
            xlj = join(SRC_ROOT, NEW_NAME)
            os.rename(ylj, xlj)

def convert_file():
    """转换文件格式"""
    for _, _, files in os.walk(SRC_ROOT):
        for fname in files:
            ylj = join(SRC_ROOT, fname)
            if not fname.endswith('jpg'):
                img = Image.open(ylj)
                xmz = fname.split(".")[0] + '.jpg'
                xlj = join(SRC_ROOT, xmz)
                img.save(xlj)
                os.remove(ylj)


def check_pixel():
    for root, _, files in os.walk(SRC_ROOT):
        for fname in files:
            image = join(SRC_ROOT, fname)

            im = Image.open(image)
            pix = im.convert('RGB')
            width = im.size[0]
            height = im.size[1]
            oimage_color_type = "Grey Image"
            is_color = []
            for x in range(width):
                for y in range(height):
                    r, g, b = pix.getpixel((x, y))
                    r = int(r)
                    g = int(g)
                    b = int(b)
                    if (r == g) and (g == b):
                        pass
                    else:
                        oimage_color_type = 'Color Image'
            return oimage_color_type

            #
            # img = np.array(Image.open(image).convert('L'))
            # rows = []
            # rows.append(img.shape)
            # # print(rows)
            # if rows == [0, 0] or rows == [(255, 255)]:
            #     os.remove(join(SRC_ROOT, fname))


def check_size():
    for root, _, files in os.walk(SRC_ROOT):
        for fname in files:
            fp = join(SRC_ROOT, fname)
            img = Image.open(fp)
            width = img.size[0]
            height = img.size[1]

            img.close()
            # size = os.path.getsize(fp)
            # file_size = 5 * 1024  # 更改成你想删除的界限,我这里是100kb

            if (width <= 32) or (height <= 32):
                os.remove(join(SRC_ROOT, fname))
            # elif size < file_size:
            #     os.remove(join(SRC_ROOT, fname))
#

DST_ROOT = '../space/garbage'


def restruct_folder():
    if exists(DST_ROOT):
        shutil.rmtree(DST_ROOT)

    for _, _, files in os.walk(SRC_ROOT):
        for fname in files:
            ylj = join(SRC_ROOT, fname)
            lxm = fname.replace(".", " ").split(" ")[-2]
            wjj_lj = join(DST_ROOT, lxm)
            if not exists(wjj_lj):
                os.makedirs(wjj_lj)
            if fname.endswith("jpg"):
                shutil.copyfile(ylj, join(wjj_lj, fname))


FINAL_ROOT = '../space/garbage-final'
from torch.utils.data import Subset
import random


FINAL_ROOT = 'E:\数据\花卉分类\\task1\\space\\garbage-final2'

def split_dataset():
    """划分训练测试集"""
    flower_class = []
    split_rate = 0.2
    if exists(FINAL_ROOT):
        shutil.rmtree(FINAL_ROOT)
    train_lj = join(FINAL_ROOT, "train")
    test_lj = join(FINAL_ROOT, "test")
    for root, dirs, files in os.walk(DST_ROOT):
        for dir_name in dirs:
            if not exists(join(train_lj, dir_name)):
                os.makedirs(join(train_lj, dir_name))
            # # 创建 验证集val 文件夹,并由5种类名在其目录下创建5个子目录
            if not exists(join(test_lj, dir_name)):
                os.makedirs(join(test_lj, dir_name))
            wl_lj = join(root, dir_name)
            images = os.listdir(wl_lj)
            num = len(images)
            eval_index = random.sample(images, k=int(num * split_rate))  # 从images列表中随机抽取 k 个图像名称 image为一个序列
            # k	是返回列表的大小
            for index, image in enumerate(images):
                # eval_index 中保存验证集val的图像名称
                if image in eval_index:
                    image_path = join(wl_lj, image)
                    new_path = join(test_lj, dir_name)
                    shutil.copy(image_path, new_path)
                    # 其余的图像保存在训练集train中
                else:
                    image_path = join(wl_lj, image)
                    new_path = join(train_lj, dir_name)
                    shutil.copy(image_path, new_path)



if __name__ == '__main__':
    find_unlabeled()
    regulate_filename()
    convert_file()
    check_pixel()
    check_size()
    restruct_folder()
    split_dataset()

 Task2  图片预处理+神经网络

import random
import cv2
import numpy as np
import torchvision
from matplotlib import pyplot as plt
import torch
from torch import nn, functional
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torch.optim.lr_scheduler import StepLR
from torchvision import transforms as transforms
import torchvision.transforms.functional as F
from PIL import Image, ImageEnhance, ImageDraw
from models import *
from model import resnet10, resnet20, resnet12, resnet6, resnet14
import numpy as np

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Imagenet标准化
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]


## 构建训练函数
def train(model, dataloader, loss_fn, optimizer, device, scheduler):
    model.train()
    losses = []
    # 通过循环加载图像数据及对应标签进行学习过程
    for i, (itdata, itlabel) in enumerate(dataloader):
        itdata = itdata.to(device)
        itlabel = itlabel.to(device)
        optimizer.zero_grad()
        outputs = model(itdata)
        loss = loss_fn(outputs, itlabel)
        loss.backward()
        optimizer.step()
        scheduler.step()
        losses.append(loss.cpu().item())
    losses_res = sum(losses) / len(losses)

    print('train process loss {}'.format(losses_res))
    return losses_res


# 利用PyTorch相关接口,对模型进行验证,并输出测试结果
## 构建测试函数
def test(model, dataloader, loss_fn, device):
    model.eval()
    losses = []
    # 通过循环加载图像数据及对应标签进行测试过程
    with torch.no_grad():
        for i, (itdata, itlabel) in enumerate(dataloader):
            itdata = itdata.to(device)
            itlabel = itlabel.to(device)
            output = model(itdata)
            loss = loss_fn(output, itlabel)
            losses.append(loss.cpu().item())
    losses_res = sum(losses) / len(losses)
    print('test process loss {}'.format(losses_res))
    return losses_res


## 定义准确率计算函数
def accuracy(model, dataloader, device):
    model.eval()
    outputsprd = []
    outputslbl = []
    # 通过循环加载图像数据进行计算得到预测值,与标签一起计算准确率
    with torch.no_grad():
        for i, (itdata, itlabel) in enumerate(dataloader):
            itdata = itdata.to(device)
            itlabel = itlabel.to(device)
            output = model(itdata)
            outputsprd.append(output.detach().cpu().numpy())
            outputslbl.append(itlabel.detach().cpu().numpy())
    outputsprd = np.concatenate(outputsprd)
    outputslbl = np.concatenate(outputslbl)
    acc = np.sum(np.equal(np.argmax(outputsprd, axis=1), outputslbl))
    return acc / len(outputslbl)


class RandomCrop(torch.nn.Module):
    def __init__(self, outputsize=(224, 224)):
        super(RandomCrop, self).__init__()
        self.outputsize = outputsize

    def forward(self, img):
        w, h = img.size
        th, tw = self.outputsize
        i = random.randint(0, h - th + 1)
        j = random.randint(0, w - tw + 1)

        return F.crop(img, i, j, h, w)


#
class Cutout(nn.Module):
    def __init__(self, n_holes=8, length=8):
        super().__init__()
        self.n_holes = n_holes
        self.length = length

    def forward(self, img):
        img = np.array(img)
        h = img.shape[0]  # 256
        w = img.shape[1]  # 256
        # mg.shape[0]:图像的垂直尺寸(高度)
        # img.shape[1]:图像的水平尺寸(宽度)
        # img.shape[2]:图像的通道数
        mask = np.ones((h, w), np.float32)

        for n in range(self.n_holes):
            y = np.random.randint(h)
            x = np.random.randint(w)

            y1 = np.clip(y - self.length // 2, 0, h)
            y2 = np.clip(y + self.length // 2, 0, h)
            x1 = np.clip(x - self.length // 2, 0, w)
            x2 = np.clip(x + self.length // 2, 0, w)

            mask[y1: y2, x1: x2] = 0.  # 行,列

        mask = np.expand_dims(mask, axis=-1)
        img = img * mask
        img = Image.fromarray(np.uint8(img))  # array转换成image

        return img


def main():
    # 批尺寸
    BATCH_SIZE = 25
    # 数据集路径
    TRAIN_DATA_DIR = "E:\数据\垃圾分类\\train"
    TEST_DATA_DIR = "E:\数据\垃圾分类\\test"
    ##数据增强和预处理方法
    """变化并统一图像尺寸方法"""
    """随机裁减 RandomCrop 函数功能,再使用该函数数据增强方法"""
    """模拟遮挡 Cutout 函数功能,再使用该函数数据增强方法"""
    """图像格式变化为Torch Tensor及归一化"""
    train_transform = transforms.Compose([
        transforms.Resize((256, 256)),
        RandomCrop((224, 224)),

        # transforms.RandomHorizontalFlip(),

        # transforms.RandomResizedCrop(224),
        # transforms.Resize((224,224)),
        # transforms.RandomResizedCrop(size=224, scale=(0.08, 1)),
        # RandomCrop(),
        # transforms.RandomHorizontalFlip(),
        # # 从中心开始裁剪,大小为224*224
        transforms.RandomHorizontalFlip(p=0.25),  # 随机水平翻转 选择一个概率概率.有百分之五十的=可能性进行水平翻转
        transforms.RandomVerticalFlip(p=0.25),

        # transforms.ColorJitter(brightness=0.2, contrast=0.1, saturation=0.1, hue=0.1),
        # 参数1为亮度,参数2为对比度,参数3为饱和度,参数4为色相
        # transforms.RandomGrayscale(p=0.025),
        Cutout(),  # 概率转换成灰度率
        transforms.ToTensor(),
        transforms.Normalize(MEAN, STD)
    ])
    ## 定义测试集的预处理方法
    """变化并统一图像尺寸方法"""
    """图像格式变化为Torch Tensor及归一化,可以使用Imagenet标准化操作"""
    test_transform = transforms.Compose([
        transforms.Resize((224, 224)),

        transforms.ToTensor(),
        transforms.Normalize(MEAN, STD)
    ])

    # 加载训练集将预处理方法作为参数输入

    trdataset = ImageFolder(TRAIN_DATA_DIR, transform=train_transform)

    # 加载测集并将预处理及增强的方法作为参数输入
    tsdataset = ImageFolder(TEST_DATA_DIR, transform=test_transform)

    ## 从Torch Dataset变化到DataLoader
    # 构建训练DataLoader
    traindataloader = torch.utils.data.DataLoader(trdataset, shuffle=True, batch_size=BATCH_SIZE)
    # 构建测试DataLoade
    testdataloader = torch.utils.data.DataLoader(tsdataset, shuffle=False, batch_size=BATCH_SIZE)


    model = resnet20(5)
    model.to(device)
    print(model)

    # 初始化损失函数
    loss_function = nn.CrossEntropyLoss()
    # 定义学习率
    LEARNING_RATE = 0.003

    # 初始化优化函数
    # optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=0.90, weight_decay=0.3)
    # optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE,weight_decay=0.3)

    """不带权重更新"""
    # optimizer = torch.optim.Adagrad(model.parameters(), lr=LEARNING_RATE)
    # optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
    optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=0.90)
    """学习率衰减"""
    # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.35, verbose=1, min_lr=0.0001,patience=125)
    # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=8,
    #                                                       verbose=False, threshold=0.0001, threshold_mode='rel',
    #                                                       cooldown=0,
    #                                                       min_lr=0, eps=1e-06)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.98)

    ## 定义训练周期
    # 指定训练周期
    EPOCHS = 20

    train_losses = []
    test_losses = []
    train_accs = []
    test_accs = []
    # 指定模型训练设备
    model.to(device)

    import time
    since = time.time()

    ## 完成指定周期的训练并将损失函数值保存
    for ep in range(EPOCHS):
        print("The number of epoch {}".format(ep))
        # 进行训练,返回损失值
        """返回损失值"""
        train_loss = train(model, traindataloader, loss_function, optimizer, device, scheduler)
        test_loss = test(model, testdataloader, loss_function, device)
        """并返回准确率"""
        acc = accuracy(model, traindataloader, device)
        acc1 = accuracy(model, testdataloader, device)
        # 将训练损失值记录
        train_losses.append(train_loss)
        # 将测试损失值记录
        test_losses.append(test_loss)
        train_accs.append(acc)
        test_accs.append(acc1)

    print("Coustoming time {}".format(time.time() - since))

    ## 计算测试集的准确率
    acc = accuracy(model, testdataloader, device)
    print("The test accuracy {}".format(acc))
    ## 绘制损失曲线
    plt.title('Training and Validation Loss')
    """matplotlib函数api绘制训练和测试返回的损失值"""
    trloss, = plt.plot(train_losses, label='Training Loss')
    ttloss, = plt.plot(test_losses, label='Validation Loss')
    plt.legend(handles=[trloss, ttloss])
    plt.show()

    ## 绘制准确率曲线
    plt.title('train and Test Accuracy')
    plt.plot(train_accs, label='Tra_Acc')
    plt.plot(test_accs, label='Tes_Acc')
    plt.show()

    ## 计算测试集的准确率
    print(max(test_accs))
    acc = accuracy(model, testdataloader, device)
    print("The accuracy {}".format(acc))
    ## 完成模型的保存
    import os
    best_acc = 0.0
    if not os.path.exists('E:\数据\花卉分类\space\\pytorch'):
        os.makedirs('../space/pytorch')
    if acc > best_acc:
        best_acc = acc
        torch.save(model.state_dict(), 'E:\数据\花卉分类\space\\pytorch\\model.pth')


if __name__ == '__main__':
    main()
from torch import nn
import numpy as np
import torch


class BasicBlock(nn.Module):
    expansion = 1

    def __init__(self, in_channels, out_channels, stride):
        super(BasicBlock, self).__init__()
        ## 基本块基本构成 卷积1+BN1+ReLU+卷积2+BN2
        ## 由卷积1输入+BN2输出的downsample,请注意卷积1输入和BN2输出的特征尺寸变化
        self.stride = stride
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
                               kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(True)
        self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels,
                               kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)

        if self.stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, stride=stride, kernel_size=1, bias=False),
                nn.BatchNorm2d(out_channels)
            )

    def forward(self, x):
        identity = x
        if self.stride != 1 or self.in_channels != self.out_channels:  # 虚线残差结构,需要下采样
            identity = self.shortcut(x)  # 捷径分支 short cut
        """残差网络模型"""
        out = self.conv1(x)
        # print(out.shape)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        # print(out.shape, identity.shape)
        out += identity
        out = self.relu(out)
        return out


class ResNet(nn.Module):

    # ResNet(3, block=BasicBlock, num_blocks=[[1, 64, 2], [1, 128, 2], [1, 256, 2], [2, 512, 2]],num_classes=num_classes)
    def __init__(self, infeat, block, num_blocks, num_classes=5):
        super(ResNet, self).__init__()
        ## ResNet模型前几层结构:卷基层+BN+ReLU+池化层
        self.in_channels = 3
        self.include_top = True  # 是否包含dense层,也就是全连接层
        self.conv1 = nn.Conv2d(3, infeat, kernel_size=7, stride=2,
                               padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(infeat)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)  # 特征图大小不变
        # 3.64,128,256,512
        infeats = [num_blocks[i][1] for i in range(len(num_blocks))]
        infeats.insert(0, infeat)
        self.layers = nn.Sequential(*[
            self._make_layer(
                block,
                infeats[i],
                num_blocks[i][1],
                num_blocks[i][0],
                stride=num_blocks[i][2]) for i in range(len(num_blocks))
        ])

        self.avgpool1 = nn.AdaptiveAvgPool2d((1, 1))  # 自适应平均池化下采样层,无论输入高宽多少,输出都是(1,1)
        self.fc = nn.Linear(num_blocks[-1][1], num_classes)  # 特征矩阵输出个数,最后一层输出就是512

    #  block:基础块的类型,是BasicBlock,还是Bottleneck
    # in_channels:当前块的输入输入通道数
    # out_channles:如果使用的是BasicBlock那么planes就等于这个block的输出通道数目,如果使用的是Bottleneck那么这个block的输出通道数目就是planes的4倍)。
    # num_blocks:块的数目
    # basicblocl 3 64 1 2   64 128 1 2

    def _make_layer(self, block, in_channels, out_channels, num_blocks,
                    stride):
        # downsample = None

        #    #第一个BasicBlock的步幅由make_layer的函数参数stride指定
        #         # ,后续的num_blocks-1个BasicBlock步幅是1
        # strides=[stride,2,2,2,2]
        strides = [stride] + [1] * (num_blocks - 1)

        layers = []
        for stride in strides:
            layers.append(block(in_channels, out_channels, stride=stride))
            print(layers)
            #  # block()生成上面定义的基础块和瓶颈块的对象,并将dowsample传递给block
            in_channels = out_channels
        return nn.Sequential(*layers)

    # 如果*号加在了是实参上,代表的是将输入迭代器拆成一个个元素. 这里表示将layers中的所有block按顺序接在一起

    def forward(self, x):
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.maxpool(out)

        # print(out.shape)

        for layer in self.layers:
            out = layer(out)

        out = self.avgpool1(out)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out


def resnet10(num_classes):

    return ResNet(64, block=BasicBlock, num_blocks=[[1, 64, 1], [1, 128, 2], [1, 256, 2], [1, 512, 2]],
                  num_classes=num_classes)


def resnet12(num_classes):
    return ResNet(3, BasicBlock, [[1, 64, 1], [1, 128, 2], [2, 256, 2], [1, 512, 2]], num_classes=num_classes)


def resnet6(num_classes):
    return ResNet(3, BasicBlock, [[1, 64, 1], [1, 128, 2]], num_classes)


def resnet14(num_classes):
    return ResNet(3, BasicBlock, [[1, 64, 1], [2, 128, 2], [2, 256, 2], [1, 512, 2]], num_classes=num_classes)


def resnet20(num_classes):

    return ResNet(3, BasicBlock, [[2, 64, 1], [2, 128, 2], [3, 256, 2], [2, 512, 2]], num_classes=num_classes)

if __name__ == '__main__':
    net = resnet10(5)
    img = torch.randn(1, 3, 124, 124)
    output = net(img)
    #print(output.shape)

 

  Task3  模型测试+在线图像的上传和识别

 


from models import snet
import torch
import numpy as np
import cv2
from torchvision import transforms
import matplotlib.pyplot as plt

# 图像ImageNet标准化
MEAN_RGB = [0.485, 0.456, 0.406]
STED_RGB = [0.229, 0.224, 0.225]

# 指定类别名称
label_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# 指定硬件设备
device = torch.device('cpu')  # 指定cpu


def load_image(image_path):
    img = cv2.imread(image_path)
    return img


# 利用OpenCV和Numpy相关接口函数完成待测试图像的预处理
def preprocess(img, mean=MEAN_RGB, std=STED_RGB):
    assert isinstance(img, np.ndarray)
    # 图像尺寸变化
    img_rs = cv2.resize(img, dsize=(32, 32), interpolation=cv2.INTER_AREA)
    # 图像通道变换BGR转换为RGB
    img_rs = cv2.cvtColor(img_rs, cv2.COLOR_BGR2RGB)
    # 满足torch tensor的维度要求
    img_rs_arr_chw = img_rs.transpose(2, 0, 1)
    # 数据类型变换为float并归一化
    img_rs_tensor = torch.Tensor(img_rs_arr_chw).to(torch.float32) / 255.
    # 标准化处理
    img_norm_t = transforms.functional.normalize(img_rs_tensor, mean, std)
    # 满足模型计算要求
    img_norm_t_b = img_norm_t.reshape(1, 3, 32, 32)
    return img_norm_t_b

# 初始化models.py中定义好的模型
model = snet
# 加载预训练模型权重
model.load_state_dict(torch.load('model .pth', map_location=device))


## 构建推理函数并完成指定图像的识别
def infer(image_path, model=model, device=device, label_names=label_names):
    img = load_image(image_path)
    # 完成图像的预处理过程
    img_t = preprocess(img)
    # 指定模型运行设备
    model.to(device)
    img_t = img_t.to(device)
    # 计算得到模型输出结果
    model.eval()
    output = model(img_t)
    result = output.detach().cpu().numpy()
    label_index = np.argmax(result)
    label = label_names[label_index]
    print("分类结果为: {}".format(label))
    return label


## 任意五张图像的推理计算
if __name__ == '__main__':
    infer('images/bird1.png')
    infer('images/car1.png')
    infer('images/deer1.png')
    infer('images/horse.png')
    infer('images/ship1.png')
# 使用Flask搭建Web应用服务,完成图像的上传和识别
import os
from flask import Flask, render_template, request, flash, make_response, redirect, url_for
from werkzeug.utils import secure_filename
from PIL import Image
from flask import send_file
from inference import infer
from engine.util import send_link

UPLOAD_FOLDER = "/space/static/images"
app = Flask(__name__, template_folder="/project/static/templates")

# 允许上传type
upload_type = set(["jpg", 'png', "JPG", "PNG"])


# 检查上传文件类型
def allow_type(filename):
    return "." in filename and filename.split('.', 1)[-1] in upload_type


route_prefix = os.environ["JUPYTERHUB_SERVICE_PREFIX"] + "export/"
print("点击下方链接进入demo页面:")
send_link(route_prefix + "index")


# 实现图片上传的视图函数upload_file,做到能成功渲染上传图片的模板文件upload.html。
# 在图像上传视图函数upload_file中加入判断语句,使得上传的图片能保存在平台中的个人空间目录下(/space)。
@app.route(route_prefix + 'index', methods=['GET', 'POST'])
def upload_file():
    if request.method == "POST":
        file = request.files['file']
        if file and allow_type(file.filename):
            filename = secure_filename(file.filename)
            if filename != file.filename:
                flash("file")
                return render_template("upload.html")
            try:
                file.save(os.path.join(UPLOAD_FOLDER, filename))
            except:
                os.makedirs(UPLOAD_FOLDER)
                file.save(os.path.join(UPLOAD_FOLDER, filename))
                return redirect(url_for(update, fileName=filename))

        else:
            return "Upload Failed"
    else:
        return render_template("upload.html")


@app.route(route_prefix + 'space/uploads/<path:fileName>', methods=["POST", 'GET'])
def updata(fileName):
    result = render_photo_as_page(fileName)
    return render_template("show.html", fname=fileName, result=result)


# 调用infer函数对上传的图像进行分类并保存预测的结果
# 将infer替换成scene_recogintion对上传的图像进行分类并保存预测的结果
def render_photo_as_page(filename):
    img = Image.open(os.path.join(UPLOAD_FOLDER, filename))
    img.save(os.path.join(UPLOAD_FOLDER, filename))
    preds = infer(os.path.join(UPLOAD_FOLDER, filename))
    result = {}
    result['prediction'] = preds
    result["filename"] = filename
    return result


## 网页获得上传的图像文件
@app.route(route_prefix + 'space/get/<path:fileName>', methods=['GET'])
def get_file(fileName):
    file_path = os.path.join(UPLOAD_FOLDER, fileName)
    return send_file(file_path)


## 实现反馈视图函数thanks,做到能处理show.html中form表单提交的预测结果的功能,以及成功渲染反馈的模板文件thanks.html。
@app.route(route_prefix + 'space/thanks', methods=['POST', 'GET'])
def thanks():
    category = request.form['Correctness']
    prediction = request.form['prediction']
    fileName = request.form['filename']
    return render_template("thanks.html", category=category, prediction=prediction, fileName=fileName)


if __name__ == '__main__':
    app.run(host="0.0.0.0")
    # 运行之后将输出的route_prefix+“index”作为url,打开浏览器输入这个url即可访问网络服务

 

 

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值