通过Django实现图像识别

提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档


该项目需要用到的标准库有: 

opencv

numpy

pickle

os

tensorflow

torch

torchvision

PIL

一、训练模型

训练图片需要自行去准备,数据集可以用cifar-10或者cifar-100,不过需要先解压数据集再进行训练,如何解压数据集呢?看如下代码:

import cv2
import numpy as np
import pickle
import os


# 解压缩,返回解压后的字典
def unpickle(file):
	fo = open(file, 'rb')
	dict = pickle.load(fo, encoding='latin1')
	fo.close()
	return dict

# path=os.getcwd()
# print(path)
def cifar10_to_images():
    tar_dir = '..//datasets//cifar-10-batches-py//'  # 原始数据库目录
    train_root_dir = '..//cirfar10//train//'  # 图片保存目录
    test_root_dir = '..//cirfar10//test//'
    if not os.path.exists(train_root_dir):
        os.makedirs(train_root_dir)
    if not os.path.exists(test_root_dir):
        os.makedirs(test_root_dir)
    # 生成训练集图片,如果需要png格式,只需要改图片后缀名即可。
    label_names = ["airplane", "automobile", "bird", "cat", "dear", "dog", "frog", "horse", "ship", "truck"]
    for j in range(1, 6):
        dataName = tar_dir + "data_batch_" + str(j)
        Xtr = unpickle(dataName)
        print(dataName + " is loading...")

        for i in range(0, 10000):
            img = np.reshape(Xtr['data'][i], (3, 32, 32))  # Xtr['data']为图片二进制数据
            img = img.transpose(1, 2, 0)  # 读取image
            picName = train_root_dir + str(Xtr['labels'][i]) + '_' + label_names[Xtr['labels'][i]] + '_' + str(
                i + (j - 1) * 10000) + '.jpg'  # label+class+index
            cv2.imwrite(picName, img)
        print(dataName + " loaded.")

    print("test_batch is loading...")

    # 生成测试集图片
    testXtr = unpickle(tar_dir + "test_batch")
    for i in range(0, 10000):
        img = np.reshape(testXtr['data'][i], (3, 32, 32))
        img = img.transpose(1, 2, 0)
        picName = test_root_dir + str(testXtr['labels'][i]) + '_' + label_names[testXtr['labels'][i]] + '_' + str(
            i) + '.jpg'
        cv2.imwrite(picName, img)
    print("test_batch loaded.")

解压数据集之后就可以开始训练模型了,我使用的是gpu来训练,如果要进行训练的话,需要下载tensorflow+cudnn+cuda,如何下载这些本文章不详细介

核心代码:

alex_net.py

import tensorflow as tf
def AlexNet(num_classes):
	return tf.keras.models.Sequential([
	    # 这里,我们使用一个 11*11 的更大窗口来捕捉对象。
	    # 同时,步幅为 4,以减少输出的高度和宽度。
	    # 另外,输出通道的数目远大于 LeNet
	    tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4,input_shape=(224,224,3), activation='relu'),
	    tf.keras.layers.MaxPool2D(pool_size=3, strides=2),
	    # 减小卷积窗口,使用填充为 2 来使得输入与输出的高和宽一致,且增大输出通道数
	    tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'),
	    tf.keras.layers.MaxPool2D(pool_size=3, strides=2),
		 # 使用三个连续的卷积层和较小的卷积窗口。
		 # 除了最后的卷积层,输出通道的数量进一步增加。
		 # 在前两个卷积层之后,汇聚层不用于减少输入的高度和宽度
		 tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),
		 tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),
		 tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'),
		 tf.keras.layers.MaxPool2D(pool_size=3, strides=2),
		 tf.keras.layers.Flatten(),
		 # 这里,全连接层的输出数量是 LeNet 中的好几倍。使用 dropout 层来减轻过拟合
		 tf.keras.layers.Dense(4096, activation='relu'),
		 tf.keras.layers.Dropout(0.5),
		 tf.keras.layers.Dense(4096, activation='relu'),
		 tf.keras.layers.Dropout(0.5),
		 # 最后是输出层。由于这里使用 Fashion-MNIST,所以用类别数为 10,而非论文中的 1000
		 tf.keras.layers.Dense(num_classes, activation='softmax')
	])

alex_net_train_animals.py

from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2,
                                    height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,)
#train_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)#注意,不能增强验证数据
test_datagen = ImageDataGenerator(rescale=1./255)#注意,不能增强测试数据
# train_generator = train_datagen.flow_from_directory(r'..\datasets\cats_and_dogs_and_horse_small\train', #目标目录
train_generator = train_datagen.flow_from_directory(r'E:\animals\train', #目标目录
                                                    target_size=(224, 224), #将所有图像的大小调整为 224×224
                                                    # batch_size=20,
                                                    batch_size=100,
                                                    class_mode='categorical') #因为使用了 binary_crossentropy 损失,所以需要用二进制标签
validation_generator = validation_datagen.flow_from_directory(
    # r'..\datasets\cats_and_dogs_and_horse_small\validation',
    r'E:\animals\validation',
    target_size=(224, 224),
    # batch_size=20,
    batch_size=100,
    class_mode='categorical')
test_generator = test_datagen.flow_from_directory(
    # r'..\datasets\cats_and_dogs_and_horse_small\test',
    r'E:\animals\test',
    target_size=(224, 224),
    # batch_size=20,
    batch_size=100,
    class_mode='categorical')
for data_batch, labels_batch in train_generator:
    print('批处理数据形状:', data_batch.shape)
    print('批处理标签形状:', labels_batch.shape)
    break
import tensorflow as tf
from keras.callbacks import ModelCheckpoint
filepath = 'model1.h5'
# 有一次提升, 则覆盖一次.
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,
save_best_only=True, mode='max', period=1)
callbacks_list = [checkpoint]
from alex_net import AlexNet
model = AlexNet(100)
# model = AlexNet(3)
model.compile(loss='categorical_crossentropy',
         optimizer= tf.keras.optimizers.Adam(lr = 0.0001),
         metrics=['acc'])
history = model.fit(
     train_generator,
     steps_per_epoch=500,
     # steps_per_epoch=300,
     epochs=100,
     validation_data=validation_generator,
     # validation_steps=150,
     validation_steps=100,
     callbacks=callbacks_list)
#加载模型权重
model.load_weights(filepath)
scores = model.evaluate(test_generator)
print("{0}: {1:.2f}%".format(model.metrics_names[1], scores[1] * 100))
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
from com.util import plotting
plotting(acc,loss,val_acc,val_loss,mark_every=2)
# #生成器输出,每个批量包含 20 个样本,一个样本生成了 224x224 的 RGB 图像
#本例中,每个批次包含 20 个样本,所以读取完所有 6000 个训练样本需要 300 个批次,3000 个验证和测试样本分别需要 150 个批次

二、使用模型并预测

核心代码:

lenet模型算法

'''1.构建神经网络'''
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet,self).__init__()
        # 卷积层1:输入图像深度=3,输出图像深度=16,卷积核大小=5*5,卷积步长=1;16表示输出维度,也表示卷积核个数
        self.conv1 = nn.Conv2d(in_channels=3,out_channels=16,kernel_size=5,stride=1)
        # 池化层1:采用最大池化,区域集大小=2*2.池化步长=2
        self.pool1 = nn.MaxPool2d(kernel_size=2,stride=2)
        # 卷积层2
        self.conv2 = nn.Conv2d(in_channels=16,out_channels=32,kernel_size=5,stride=1)
        # 池化层2
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        # 全连接层1:输入大小=32*5*5,输出大小=120
        self.fc1 = nn.Linear(32*5*5,120)
        # 全连接层2
        self.fc2 = nn.Linear(120,84)
        # 全连接层3
        self.fc3 = nn.Linear(84,10)

    def forward(self,x):
        x = F.relu(self.conv1(x))  # input(3, 32, 32) output(16, 28, 28)
        x = self.pool1(x)  # output(16, 14, 14)
        x = F.relu(self.conv2(x))  # output(32, 10, 10)
        x = self.pool2(x)  # output(32, 5, 5)
        x = x.view(-1, 32 * 5 * 5)  # output(32*5*5)
        x = F.relu(self.fc1(x))  # output(120)
        x = F.relu(self.fc2(x))  # output(84)
        x = self.fc3(x)  # output(10)
        return x
import torch
import torchvision.transforms as transforms
from PIL import Image
from zzz.main import LeNet

transform = transforms.Compose([transforms.Resize((32, 32)),transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

classes = ('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')

net = LeNet()
net.load_state_dict(torch.load('Lenet.pth'))

im = Image.open('D:/ship.jpg')
im = transform(im)  # [C, H, W]
# 输入pytorch网络中要求的格式是[batch,channel,height,width],所以这里增加一个维度
im = torch.unsqueeze(im, dim=0)  # [N, C, H, W]

with torch.no_grad():
    outputs = net(im)
    predict = torch.max(outputs, dim=1)[1].data.numpy() # 索引即classed中的类别
print(classes[int(predict)])

# 直接打印张量的预测结果
with torch.no_grad():
    outputs = net(im)
    predict = torch.softmax(outputs,dim=1) # [batch,channel,height,width],这里因为对batch不需要处理
print(predict)

三、通过django实现图像识别

前端部分

1.首先导入bootstrap前端框架,bootstrap可以从官网上下载

2.需要导入Django的静态资源引用标签{% load static %},然后在所有的引用路径前需要添加static标记,即采用类似href="{% static 'css/bootstrap.css' %}"这种引用方式。通过这种方式就可以正确地引用静态配置文件

{% load static %}
<html lang="zh-cn">

<head>
    <meta charset="utf-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <meta name="viewport" content="width=device-width, initial-scale=1">
    <title>图像识别{% block title %}{% endblock %} </title>
    <link href="{% static 'css/bootstrap.css' %}" rel="stylesheet">
    <link href="{% static 'css/style.css' %}" rel="stylesheet">
    <script src="{% static 'js/jquery.min.js' %}"></script>
    <script src="{% static 'js/bootstrap.min.js' %}"></script>
</head>
<body>
<!-- 主体内容 -->
    <div class="model-details">
        <img class="img-responsive" style="max-width:700px;" src="{% static 'img/dog.jpg' %}">
        <!-- 按钮触发模态框 -->
        <h3>使用产品</h3>
        <button class="btn btn-primary btn-lg" data-toggle="modal" data-target="#myModal">
            在线图像识别
        </button>
        <!-- 模态框(Modal) -->
        <div class="modal fade" id="myModal" tabindex="-1" role="dialog" aria-labelledby="myModalLabel"
            aria-hidden="true">
            <div class="modal-dialog">
                <div class="modal-content">
                    <div class="modal-header">
                        <button type="button" class="close" data-dismiss="modal" aria-hidden="true">
                            &times;
                        </button>
                        <h4 class="modal-title" id="myModalLabel">
                            在线图像识别
                        </h4>
                    </div>
                    <div class="modal-body">
                        <img id="photoIn" src="{% static 'img/sample.jpg' %}" class="img-responsive"
                            style="max-width:250px">
                        <h4>请上传图片</h4>
                        <input type="file" id="photo" name="photo" />
                    </div>
                    <div class="modal-footer">
                        <button type="button" class="btn btn-default" data-dismiss="modal">关闭
                        </button>
                        <button type="button" id="compute" class="btn btn-primary">
                            开始检测
                        </button>
                        <textarea id="output" disabled class="form-control" rows="5"
                            style="text-align: left;font: 40px 宋体;border: 0">
                        </textarea>
                    </div>
                </div><!-- /.modal-content -->
            </div><!-- /.modal -->
        </div>
        <script>
            $(function () {
                $('#photo').on('change', function () {
                    var r = new FileReader();
                    f = document.getElementById('photo').files[0];
                    r.readAsDataURL(f);
                    r.onload = function (e) {
                        document.getElementById('photoIn').src = this.result;
                    };
                });
            });
        </script>   
    </div>
</div>
<!-- 图像发送至后台服务器进行识别 -->
<script>
    $('#compute').click(function () {
        formdata = new FormData();
        var file = $("#photo")[0].files[0];
        formdata.append("image", file);
        $.ajax({
            url: '/productapp/imgdetect/', // 调用Django服务器计算函数
            type: 'POST', // 请求类型
            data: formdata,
            dataType: 'json', // 期望获得的响应类型为json
            processData: false,
            contentType: false,
            success: ShowResult // 在请求成功之后调用该回调函数输出结果
        })
    })
</script>

<!-- 返回结果显示 -->
<script>
    function ShowResult(data) {
        output.value = data['output'];
    }
</script>
<script>
    document.getElementById("output").style.width="150px";
    document.getElementById("output").style.height="70px";
</script>
<body/>
{% endblock %}

后端部分

from django.shortcuts import render


# Create your views here.


def system(request):
	return render(request, 'imgsystem.html')


import numpy as np  # 矩阵运算
import urllib  # url解析
import json  # json字符串使用
import cv2  # opencv包
import os  # 执行操作系统命令
from django.views.decorators.csrf import csrf_exempt  # 跨站点验证
from django.http import JsonResponse  # json字符串响应
from PIL import Image
import keras.models
import numpy as np
import tensorflow as tf
from keras.preprocessing import image
from django.urls import path
from django.views.static import serve
from demo1 import settings
from django.conf import settings
from django.http import HttpResponse


def read_image(stream=None):
	if stream is not None:
		data_temp = stream.read()
	img = np.asarray(bytearray(data_temp), dtype="uint8")
	img = cv2.imdecode(img, cv2.IMREAD_COLOR)
	return img


@csrf_exempt  # 用于规避跨站点请求攻击
def imgdetect(request):
	result = {"code": None}
	if request.method == "POST":  # 规定客户端使用POST上传图片
		filelist = request.FILES.get("image", None)  # 读取图像
		img = read_image(stream=request.FILES["image"])
		if not filelist:
			return HttpResponse('file not found')
		
		# 被我保存到本地啦,保存到哪里随意啦
		save_path = 'E:\\test\\demo1\\product1\\add_img\\%s' % (filelist.name)
		with open(save_path, 'wb+') as f:
			for chunk in filelist.chunks():
				f.write(chunk)
		model = keras.models.load_model("E:/test/demo1/productapp/model.h5")
		classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
		# img=Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
		# img=image.load_img('product1/add_img/cat.1.jpg',target_size=(224,224,))
		img = image.load_img(save_path, target_size=(224, 224,))
		img_data = image.img_to_array(img)
		img_data = np.expand_dims(img_data, axis=0)
		test_img = img_data / 255  # 此处还需要将0-255转化为0-1
		# test_img = np.expand_dims(test_img, 0)  # 将三维输入图像拓展成四维张量
		pred = model.predict(test_img)  # 预测
		# print('预测结果:', end='')
		code = classes[pred.argmax()]
		result.update({"output": code})
		return JsonResponse(result)

其他需要注意的内容

1.需要在setting文件中添加下列几行代码

2.需要在项目根目录下新建product1文件夹,目录结构如下:

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值