# manage.py
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'car.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
# web/index.py
from django.http import JsonResponse
from django.shortcuts import render
import os
import cv2
import imghdr
import time
import numpy as np
from tensorflow import keras
from django.views.decorators.csrf import csrf_exempt
from util.CNN import cnn_predict
from util.Unet import unet_predict
from util.core import locate_and_correct
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
"""
django-admin startproject car
manage.py runserver 0.0.0.0:8000
"""
MEDIA_ROOT = os.path.join(BASE_DIR, 'h5')
location = keras.models.load_model(os.path.join(MEDIA_ROOT, 'location.h5')) # 车牌定位
cnn = keras.models.load_model(os.path.join(MEDIA_ROOT, 'cnn.h5')) # 车牌识别
def main(request):
context = {'content': "车牌识别"}
return render(request, 'index.html', context)
@csrf_exempt
def upload(request):
try:
my_file = request.FILES.get("licensePlate", None)
if my_file:
path = os.path.join(BASE_DIR, 'images')
suffix = my_file.name.split(".")[1]
file_name = (str(round(time.time() * 1000000)))+'.'+suffix
image_path = os.path.join(path, file_name)
destination = open(image_path,
'wb+')
for chunk in my_file.chunks():
destination.write(chunk)
destination.close()
imgType_list = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif'}
if imghdr.what(image_path) in imgType_list:
license_plate = correct(image_path)
return JsonResponse({'status': True, 'license_plate': license_plate},
json_dumps_params={'ensure_ascii': False})
else:
return JsonResponse({'status': False, 'msg': u'错误:文件类型不对额'},
json_dumps_params={'ensure_ascii': False})
else:
return JsonResponse({'status': False, 'msg': u'错误:文件不存在'}, json_dumps_params={'ensure_ascii': False})
except Exception as e:
return JsonResponse({'status': False, 'msg': u'错误:{}'.format(e)}, json_dumps_params={'ensure_ascii': False})
def correct(image_path):
img_src = cv2.imdecode(np.fromfile(image_path, dtype=np.uint8), -1) # 从中文路径读取时用
h, w = img_src.shape[0], img_src.shape[1]
if h * w <= 240 * 80 and 2 <= w / h <= 5: # 满足该条件说明可能整个图片就是一张车牌,无需定位,直接识别即可
lic = cv2.resize(img_src, dsize=(240, 80), interpolation=cv2.INTER_AREA)[:, :, :3] # 直接resize为(240,80)
img_src_copy, Lic_img = img_src, [lic]
else: # 否则就需通过unet对img_src原图预测,得到img_mask,实现车牌定位,然后进行识别
img_src, img_mask = unet_predict(location, image_path)
img_src_copy, Lic_img = locate_and_correct(img_src, img_mask) # 利用core.py中的locate_and_correct函数进行车牌定位和矫正
Lic_predict = cnn_predict(cnn, Lic_img) # 利用cnn进行车牌的识别预测,Lic_pred中存的是元祖(车牌图片,识别结果)
license_plate = ""
if Lic_predict:
for i, lic in enumerate(Lic_predict):
if i == 0:
license_plate = lic[1]
elif i == 1:
license_plate = lic[1]
elif i == 2:
license_plate = lic[1]
print("车牌号:" + license_plate)
else: # Lic_predict 为空说明未能识别
print("未能识别")
return license_plate
if __name__ == '__main__':
img_path = "c:\\01.jpg"
correct(img_path)
# util/CNN.py
# -*- coding:utf-8 -*-
from tensorflow.keras import layers, losses, models
import numpy as np
import cv2
import os
# 训练模型
def cnn_train():
char_dict = {"京": 0, "沪": 1, "津": 2, "渝": 3, "冀": 4, "晋": 5, "蒙": 6, "辽": 7, "吉": 8, "黑": 9, "苏": 10,
"浙": 11, "皖": 12, "闽": 13, "赣": 14, "鲁": 15, "豫": 16, "鄂": 17, "湘": 18, "粤": 19, "桂": 20,
"琼": 21, "川": 22, "贵": 23, "云": 24, "藏": 25, "陕": 26, "甘": 27, "青": 28, "宁": 29, "新": 30,
"0": 31, "1": 32, "2": 33, "3": 34, "4": 35, "5": 36, "6": 37, "7": 38, "8": 39, "9": 40,
"A": 41, "B": 42, "C": 43, "D": 44, "E": 45, "F": 46, "G": 47, "H": 48, "J": 49, "K": 50,
"L": 51, "M": 52, "N": 53, "P": 54, "Q": 55, "R": 56, "S": 57, "T": 58, "U": 59, "V": 60,
"W": 61, "X": 62, "Y": 63, "Z": 64}
# 读取数据集
path = 'home/cnn_datasets/' # 车牌号数据集路径(车牌图片宽240,高80)
pic_name = sorted(os.listdir(path))
n = len(pic_name)
x_train, y_train = [], []
for i in range(n):
print("正在读取第%d张图片" % i)
img = cv2.imdecode(np.fromfile(path + pic_name[i], dtype=np.uint8), -1) # cv2.imshow无法读取中文路径图片,改用此方式
label = [char_dict[name] for name in pic_name[i][0:7]] # 图片名前7位为车牌标签
x_train.append(img)
y_train.append(label)
x_train = np.array(x_train)
y_train = [np.array(y_train)[:, i] for i in range(7)] # y_train是长度为7的列表,其中每个都是shape为(n,)的ndarray,分别对应n张图片的第一个字符,第二个字符....第七个字符
# cnn模型
input = layers.Input((80, 240, 3)) # 车牌图片shape(80,240,3)
x = input
x = layers.Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='same', activation='relu')(x)
x = layers.MaxPool2D(pool_size=(2, 2), padding='same', strides=2)(x)
for i in range(3):
x = layers.Conv2D(filters=32 * 2 ** i, kernel_size=(3, 3), padding='valid', activation='relu')(x)
x = layers.Conv2D(filters=32 * 2 ** i, kernel_size=(3, 3), padding='valid', activation='relu')(x)
x = layers.MaxPool2D(pool_size=(2, 2), padding='same', strides=2)(x)
x = layers.Dropout(0.5)(x)
x = layers.Flatten()(x)
x = layers.Dropout(0.3)(x)
output = [layers.Dense(65, activation='softmax', name='c%d' % (i + 1))(x) for i in range(7)] # 7个输出分别对应车牌7个字符,每个输出都为65个类别类概率
model = models.Model(inputs=input, outputs=output)
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy', # y_train未进行one-hot编码,所以loss选择sparse_categorical_crossentropy
metrics=['accuracy'])
# 模型训练
print("开始训练cnn")
model.fit(x_train, y_train, epochs=35) # 总loss为7个loss的和
model.save('cnn.h5')
print('cnn.h5保存成功!!!')
# 车牌识别
def cnn_predict(cnn, Lic_img):
characters = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫",
"鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2",
"3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M",
"N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
Lic_pred = []
for lic in Lic_img:
lic_pred = cnn.predict(lic.reshape(1, 80, 240, 3)) # 预测形状应为(1,80,240,3)
lic_pred = np.array(lic_pred).reshape(7, 65) # 列表转为ndarray,形状为(7,65)
if len(lic_pred[lic_pred >= 0.8]) >= 4: # 统计其中预测概率值大于80%以上的个数,大于等于4个以上认为识别率高,识别成功
chars = ''
for arg in np.argmax(lic_pred, axis=1): # 取每行中概率值最大的arg,将其转为字符
chars += characters[arg]
chars = chars[0:2] + '·' + chars[2:]
Lic_pred.append((lic, chars)) # 将车牌和识别结果一并存入Lic_pred
return Lic_pred
# util/core.py
# -*- coding:utf-8 -*-
import cv2
import numpy as np
def locate_and_correct(img_src, img_mask):
"""
该函数通过cv2对img_mask进行边缘检测,获取车牌区域的边缘坐标(存储在contours中)和最小外接矩形4个端点坐标,
再从车牌的边缘坐标中计算出和最小外接矩形4个端点最近的点即为平行四边形车牌的四个端点,从而实现车牌的定位和矫正
:param img_src: 原始图片
:param img_mask: 通过u_net进行图像分隔得到的二值化图片,车牌区域呈现白色,背景区域为黑色
:return: 定位且矫正后的车牌
"""
# cv2.imshow('img_mask',img_mask)
# cv2.waitKey(0)
# ret,thresh = cv2.threshold(img_mask[:,:,0],0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) #二值化
# cv2.imshow('thresh',thresh)
# cv2.waitKey(0)
try:
contours, hierarchy = cv2.findContours(img_mask[:, :, 0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except: # 防止opencv版本不一致报错
ret, contours, hierarchy = cv2.findContours(img_mask[:, :, 0], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if not len(contours): # contours1长度为0说明未检测到车牌
# print("未检测到车牌")
return [], []
else:
Lic_img = []
img_src_copy = img_src.copy() # img_src_copy用于绘制出定位的车牌轮廓
for ii, cont in enumerate(contours):
x, y, w, h = cv2.boundingRect(cont) # 获取最小外接矩形
img_cut_mask = img_mask[y:y + h, x:x + w] # 将标签车牌区域截取出来
# cv2.imshow('img_cut_mask',img_cut_mask)
# cv2.waitKey(0)
# print('w,h,均值,宽高比',w,h,np.mean(img_cut_mask),w/h)
# contours中除了车牌区域可能会有宽或高都是1或者2这样的小噪点,
# 而待选车牌区域的均值应较高,且宽和高不会非常小,因此通过以下条件进行筛选
if np.mean(img_cut_mask) >= 75 and w > 15 and h > 15:
rect = cv2.minAreaRect(cont) # 针对坐标点获取带方向角的最小外接矩形,中心点坐标,宽高,旋转角度
box = cv2.boxPoints(rect).astype(np.int32) # 获取最小外接矩形四个顶点坐标
# cv2.drawContours(img_mask, contours, -1, (0, 0, 255), 2)
# cv2.drawContours(img_mask, [box], 0, (0, 255, 0), 2)
# cv2.imshow('img_mask',img_mask)
# cv2.waitKey(0)
cont = cont.reshape(-1, 2).tolist()
# 由于转换矩阵的两组坐标位置需要一一对应,因此需要将最小外接矩形的坐标进行排序,最终排序为[左上,左下,右上,右下]
box = sorted(box, key=lambda xy: xy[0]) # 先按照左右进行排序,分为左侧的坐标和右侧的坐标
box_left, box_right = box[:2], box[2:] # 此时box的前2个是左侧的坐标,后2个是右侧的坐标
box_left = sorted(box_left, key=lambda x: x[1]) # 再按照上下即y进行排序,此时box_left中为左上和左下两个端点坐标
box_right = sorted(box_right, key=lambda x: x[1]) # 此时box_right中为右上和右下两个端点坐标
box = np.array(box_left + box_right) # [左上,左下,右上,右下]
# print(box)
x0, y0 = box[0][0], box[0][1] # 这里的4个坐标即为最小外接矩形的四个坐标,接下来需获取平行(或不规则)四边形的坐标
x1, y1 = box[1][0], box[1][1]
x2, y2 = box[2][0], box[2][1]
x3, y3 = box[3][0], box[3][1]
def point_to_line_distance(X, Y):
if x2 - x0:
k_up = (y2 - y0) / (x2 - x0) # 斜率不为无穷大
d_up = abs(k_up * X - Y + y2 - k_up * x2) / (k_up ** 2 + 1) ** 0.5
else: # 斜率无穷大
d_up = abs(X - x2)
if x1 - x3:
k_down = (y1 - y3) / (x1 - x3) # 斜率不为无穷大
d_down = abs(k_down * X - Y + y1 - k_down * x1) / (k_down ** 2 + 1) ** 0.5
else: # 斜率无穷大
d_down = abs(X - x1)
return d_up, d_down
d0, d1, d2, d3 = np.inf, np.inf, np.inf, np.inf
l0, l1, l2, l3 = (x0, y0), (x1, y1), (x2, y2), (x3, y3)
for each in cont: # 计算cont中的坐标与矩形四个坐标的距离以及到上下两条直线的距离,对距离和进行权重的添加,成功计算选出四边形的4个顶点坐标
x, y = each[0], each[1]
dis0 = (x - x0) ** 2 + (y - y0) ** 2
dis1 = (x - x1) ** 2 + (y - y1) ** 2
dis2 = (x - x2) ** 2 + (y - y2) ** 2
dis3 = (x - x3) ** 2 + (y - y3) ** 2
d_up, d_down = point_to_line_distance(x, y)
weight = 0.975
if weight * d_up + (1 - weight) * dis0 < d0: # 小于则更新
d0 = weight * d_up + (1 - weight) * dis0
l0 = (x, y)
if weight * d_down + (1 - weight) * dis1 < d1:
d1 = weight * d_down + (1 - weight) * dis1
l1 = (x, y)
if weight * d_up + (1 - weight) * dis2 < d2:
d2 = weight * d_up + (1 - weight) * dis2
l2 = (x, y)
if weight * d_down + (1 - weight) * dis3 < d3:
d3 = weight * d_down + (1 - weight) * dis3
l3 = (x, y)
# print([l0,l1,l2,l3])
# for l in [l0, l1, l2, l3]:
# cv2.circle(img=img_mask, color=(0, 255, 255), center=tuple(l), thickness=2, radius=2)
# cv2.imshow('img_mask',img_mask)
# cv2.waitKey(0)
p0 = np.float32([l0, l1, l2, l3]) # 左上角,左下角,右上角,右下角,p0和p1中的坐标顺序对应,以进行转换矩阵的形成
p1 = np.float32([(0, 0), (0, 80), (240, 0), (240, 80)]) # 我们所需的长方形
transform_mat = cv2.getPerspectiveTransform(p0, p1) # 构成转换矩阵
lic = cv2.warpPerspective(img_src, transform_mat, (240, 80)) # 进行车牌矫正
# cv2.imshow('lic',lic)
# cv2.waitKey(0)
Lic_img.append(lic)
cv2.drawContours(img_src_copy, [np.array([l0, l1, l3, l2])], -1, (0, 255, 0), 2) # 在img_src_copy上绘制出定位的车牌轮廓,(0, 255, 0)表示绘制线条为绿色
return img_src_copy, Lic_img
# util/Unet.py
# -*- coding:utf-8 -*-
import numpy as np
import os
import cv2
from tensorflow.keras import layers, losses, models
def unet_train():
height = 512
width = 512
path = 'D:/desktop/unet_datasets/'
input_name = os.listdir(path + 'train_image')
n = len(input_name)
print(n)
X_train, y_train = [], []
for i in range(n):
print("正在读取第%d张图片" % i)
img = cv2.imread(path + 'train_image/%d.png' % i)
label = cv2.imread(path + 'train_label/%d.png' % i)
X_train.append(img)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
def Conv2d_BN(x, nb_filter, kernel_size, strides=(1, 1), padding='same'):
x = layers.Conv2D(nb_filter, kernel_size, strides=strides, padding=padding)(x)
x = layers.BatchNormalization(axis=3)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
return x
def Conv2dT_BN(x, filters, kernel_size, strides=(2, 2), padding='same'):
x = layers.Conv2DTranspose(filters, kernel_size, strides=strides, padding=padding)(x)
x = layers.BatchNormalization(axis=3)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
return x
inpt = layers.Input(shape=(height, width, 3))
conv1 = Conv2d_BN(inpt, 8, (3, 3))
conv1 = Conv2d_BN(conv1, 8, (3, 3))
pool1 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(conv1)
conv2 = Conv2d_BN(pool1, 16, (3, 3))
conv2 = Conv2d_BN(conv2, 16, (3, 3))
pool2 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(conv2)
conv3 = Conv2d_BN(pool2, 32, (3, 3))
conv3 = Conv2d_BN(conv3, 32, (3, 3))
pool3 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(conv3)
conv4 = Conv2d_BN(pool3, 64, (3, 3))
conv4 = Conv2d_BN(conv4, 64, (3, 3))
pool4 = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(conv4)
conv5 = Conv2d_BN(pool4, 128, (3, 3))
conv5 = layers.Dropout(0.5)(conv5)
conv5 = Conv2d_BN(conv5, 128, (3, 3))
conv5 = layers.Dropout(0.5)(conv5)
convt1 = Conv2dT_BN(conv5, 64, (3, 3))
concat1 = layers.concatenate([conv4, convt1], axis=3)
concat1 = layers.Dropout(0.5)(concat1)
conv6 = Conv2d_BN(concat1, 64, (3, 3))
conv6 = Conv2d_BN(conv6, 64, (3, 3))
convt2 = Conv2dT_BN(conv6, 32, (3, 3))
concat2 = layers.concatenate([conv3, convt2], axis=3)
concat2 = layers.Dropout(0.5)(concat2)
conv7 = Conv2d_BN(concat2, 32, (3, 3))
conv7 = Conv2d_BN(conv7, 32, (3, 3))
convt3 = Conv2dT_BN(conv7, 16, (3, 3))
concat3 = layers.concatenate([conv2, convt3], axis=3)
concat3 = layers.Dropout(0.5)(concat3)
conv8 = Conv2d_BN(concat3, 16, (3, 3))
conv8 = Conv2d_BN(conv8, 16, (3, 3))
convt4 = Conv2dT_BN(conv8, 8, (3, 3))
concat4 = layers.concatenate([conv1, convt4], axis=3)
concat4 = layers.Dropout(0.5)(concat4)
conv9 = Conv2d_BN(concat4, 8, (3, 3))
conv9 = Conv2d_BN(conv9, 8, (3, 3))
conv9 = layers.Dropout(0.5)(conv9)
outpt = layers.Conv2D(filters=3, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(conv9)
model = models.Model(inpt, outpt)
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['accuracy'])
model.summary()
print("开始训练u-net")
model.fit(X_train, y_train, epochs=100, batch_size=15)#epochs和batch_size看个人情况调整,batch_size不要过大,否则内存容易溢出
#我11G显存也只能设置15-20左右,我训练最终loss降低至250左右,acc约95%左右
model.save('location.h5')
print('unet.h5保存成功!!!')
def unet_predict(unet, img_src_path):
img_src = cv2.imdecode(np.fromfile(img_src_path, dtype=np.uint8), -1) # 从中文路径读取时用
# img_src=cv2.imread(img_src_path)
if img_src.shape != (512, 512, 3):
img_src = cv2.resize(img_src, dsize=(512, 512), interpolation=cv2.INTER_AREA)[:, :, :3] # dsize=(宽度,高度),[:,:,:3]是防止图片为4通道图片,后续无法reshape
img_src = img_src.reshape(1, 512, 512, 3) # 预测图片shape为(1,512,512,3)
img_mask = unet.predict(img_src) # 归一化除以255后进行预测
img_src = img_src.reshape(512, 512, 3) # 将原图reshape为3维
img_mask = img_mask.reshape(512, 512, 3) # 将预测后图片reshape为3维
img_mask = img_mask / np.max(img_mask) * 255 # 归一化后乘以255
img_mask[:, :, 2] = img_mask[:, :, 1] = img_mask[:, :, 0] # 三个通道保持相同
img_mask = img_mask.astype(np.uint8) # 将img_mask类型转为int型
return img_src, img_mask
templates/index.html
<!DOCTYPE html>
<html lang="en">
<title>在线车牌识别</title>
<meta http-equiv="Content-Security-Policy" content="upgrade-insecure-requests">
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<link rel="icon" href="/static/images/favicon.ico" type="image/x-icon" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<link rel="stylesheet"href="/static/iview/styles/iview.css">
<style>
.home-container {
width: 600px;
margin: 0 auto;
margin-top:200px;
position: relative;
}
[v-cloak] {
display: none;
}
</style>
<body>
<div id="app" v-cloak>
<div class="home-container">
<template>
<tabs>
<tab-pane @click="tab(0)" icon="md-car" label="车牌识别"/>
<template>
<Upload
multiple
type="drag"
name="licensePlate"
action="/upload"
:show-upload-list="false"
:on-success="handleSuccess"
:format="['png','jpg','jpeg','bmp']"
:max-size="1024">
<div style="padding: 20px 0">
<Icon type="ios-cloud-upload" size="52" style="color: #3399ff"></Icon>
<p>点击或将图片拖拽到这里上传,支持格式 [jpg、bmp、png、jpeg]</p>
</div>
</Upload>
</template>
</tab-pane>
</tabs>
</template>
<div style="margin-top:10px" v-html="licensePlate"></div>
</div>
</div>
<script type="text/javascript" src="/static/vue/vue.min.js" ></script>
<script type="text/javascript" src="/static/iview/iview.min.js"></script>
<script>
var vm = new Vue({
el : '#app',
data:{
licensePlate:''
},
methods : {
handleSuccess:function(response, file, fileList){
if(response.status){
vm.licensePlate = response.license_plate;
this.$Message.success('车牌号:'+vm.licensePlate);
}else{
this.$Message.error(response.msg);
}
}
},
created : function() {
this.$Notice.info({
title: '欢迎体验在线车牌识别系统',
desc: ''
});
}
});
</script>
</body>
</html>
# car/asgi.py
"""
ASGI config for car project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'car.settings')
application = get_asgi_application()
# car/settings.py
"""
Django settings for car project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-tsb+rd!6hm^+wjbzzbnyuo5coczu__2g)(ok1r!4jc(%^_4b#8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'car.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'car.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# 上传文件大小
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024
# 上传数据大小
DATA_UPLOAD_MAX_MEMORY_SIZE = 1024
# car/urls.py
"""car URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import path
from django.contrib import admin
from web import index
urlpatterns = [
path('', index.main), # new
path('upload', index.upload), # new
path('admin/', admin.site.urls),
]
# car/wsgi.py
"""
WSGI config for car project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'car.settings')
application = get_wsgi_application()