tensorflow1.x+pyqt5实现物品分类+展示界面+双界面切换

open.py
# -*- coding: utf-8 -*-

# Form implementation generated from reading ui file 'open.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again.  Do not edit this file unless you know what you are doing.


from PyQt5 import QtCore, QtGui, QtWidgets


class Ui_MainWindow(object):
    def setupUi(self, MainWindow):
        MainWindow.setObjectName("MainWindow")
        MainWindow.setFixedSize(959, 750)
        MainWindow.setStyleSheet("")
        self.centralwidget = QtWidgets.QWidget(MainWindow)
        self.centralwidget.setObjectName("centralwidget")
        self.listView = QtWidgets.QListView(self.centralwidget)
        self.listView.setGeometry(QtCore.QRect(-10, -30, 971, 761))
        self.listView.setStyleSheet("border-image: url(./background.png);")
        self.listView.setObjectName("listView")
        self.label = QtWidgets.QLabel(self.centralwidget)
        self.label.setGeometry(QtCore.QRect(230, 280, 461, 131))
        self.label.setObjectName("label")
        MainWindow.setCentralWidget(self.centralwidget)
        self.menubar = QtWidgets.QMenuBar(MainWindow)
        self.menubar.setGeometry(QtCore.QRect(0, 0, 959, 26))
        self.menubar.setObjectName("menubar")
        self.menu = QtWidgets.QMenu(self.menubar)
        self.menu.setObjectName("menu")
        MainWindow.setMenuBar(self.menubar)
        self.type = QtWidgets.QAction(MainWindow)
        self.type.setObjectName("type")
        self.time = QtWidgets.QAction(MainWindow)
        self.time.setObjectName("time")
        self.menu.addAction(self.type)
        self.menu.addAction(self.time)
        self.menubar.addAction(self.menu.menuAction())
        self.retranslateUi(MainWindow)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)

    def retranslateUi(self, MainWindow):
        _translate = QtCore.QCoreApplication.translate
        MainWindow.setWindowTitle(_translate("MainWindow", "主界面"))
        self.label.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:18pt; font-weight:600; color:#000000;\">欢迎使用******!</span></p><p align=\"center\"><span style=\" font-size:12pt; font-weight:600; color:#000000;\">请在左上角选择功能进入</span></p></body></html>"))
        self.menu.setTitle(_translate("MainWindow", "识别方式"))
        self.type.setText(_translate("MainWindow", "识别方式1"))
        self.time.setText(_translate("MainWindow", "识别方式2"))

openwindow.py


from open import Ui_MainWindow

import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
import sys
import base64
from back import Ui_window

from PyQt5.QtWidgets import QMainWindow,QFileDialog,QApplication,QMessageBox
from PyQt5.QtGui import QIcon,QPixmap
from PyQt5.QtCore import pyqtSignal,QCoreApplication


from val import validation,validation1

from test import evaluate_one_image
from PIL import Image
import numpy as np
import json

from title_png import img as title    #引入img变量,赋别名为tilte
from button_png import img as button
from background_png import img as background    #引入img变量,赋别名为tilte



tmp = open('title.png', 'wb')        #创建临时的文件,其实就是在当下路径创建出了个 title.png的图片,加载这张图的时候就在当下路径引用这个图片就可以了
tmp.write(base64.b64decode(title))    ##把图片解码出来,写入文件中去。
tmp.close()
tmp1 = open('button.png', 'wb')
tmp1.write(base64.b64decode(button))
tmp1.close()
tmp2 = open('background.png', 'wb')
tmp2.write(base64.b64decode(background))
tmp2.close()



class MyMainWindow(QMainWindow, Ui_window):
    _signal = pyqtSignal(str)
    # _signal_times = pyqtSignal(int)
    def __init__(self):
        super(MyMainWindow, self).__init__()
        _translate = QCoreApplication.translate
        self.setupUi(self)
        self.setWindowTitle("功能界面")
        self.setWindowIcon(QIcon('title.png'))
        self.label_xietitle.setText(_translate("window","<html><head/><body><p align=\"center\"><span style=\" font-size:24pt; font-weight:600; color:#ff5500;\">油液磨粒自动识别系统</span><span style=\" font-size:18pt; font-weight:600; color:#ff0000; vertical-align:sub;\">(类别)</span></p></body></html>"))

        self.directory = ''
        self.num_classes = ''
        self.savepath = ''
        self.lineEdit.setPlaceholderText("650")
        self.learningtimes = 650
        self.logfile = ''
        self.classesa = ''
        self.classesb = ''
        self.lists = []
        self.validation_accuracy = ''
        self.valiadation_path = ''

        self.select_train.clicked.connect(self.OpenTrainPath)
        self.start_train.clicked.connect(self.StartTrain)
        self.train_saver.clicked.connect(self.SavePath)
        self.test_one.clicked.connect(self.OpenPicture)
        self.load_saver.clicked.connect(self.LoadTrainFile)
        self._signal.connect(self.ShowAccuracy)
        # self._signal_times.connect(self.getLearningTimes)
        self.lineEdit.returnPressed.connect(self.GetTimes)
        # self.lineEdit_2.returnPressed.connect(self.GetNumClasses)
        self.load_classes.clicked.connect(self.GetClasses)
        self.load_val.clicked.connect(self.Validation)
        self.back.clicked.connect(self.jump_to_first)


    def OpenTrainPath(self):
        self.directory = QFileDialog.getExistingDirectory(self, "选择文件夹")
        # print(self.directory)
        if len(self.directory) != 0:
            self.select_train.setStyleSheet("border-image: url(./button.png);color:blue")
            self.num_classes = len(os.listdir(self.directory))
            for classes_list in os.listdir(self.directory):
                self.lists.append(classes_list)
            # print(self.lists)
            c_list = json.dumps(self.lists)
            list_save_path = self.directory + '.txt'
            a = open(list_save_path, 'w')
            a.write(c_list)
            a.close()
        # self.num_classes, _ = enumerate(os.listdir(self.directory))

    def SavePath(self):
        self.savepath = QFileDialog.getExistingDirectory(self, "选择文件夹")
        if len(self.savepath) != 0:
            self.train_saver.setStyleSheet("border-image: url(./button.png);color:blue")

    def GetTimes(self):
        # self.label_7.setText(self.lineEdit.text())
        self.learningtimes = int(self.lineEdit.text())

    def StartTrain(self):
        if self.directory != '' and self.savepath != '':
            self.start_train.setStyleSheet("border-image: url(./button.png);color:blue")
            self.label7.setText("Start Training")
            # print(self.directory)
            # print(self.num_classes)
            # print(self.learningtimes)
            self.train(directory=self.directory, num_classes=self.num_classes, save_path=self.savepath,
                       max_step=self.learningtimes)
        else:
            self.label7.setText("请选择训练集和保存地址")

    def ShowAccuracy(self, str):
        self.label7.setText(str)

    def train(self, directory, num_classes, save_path, max_step):
        import os
        import numpy as np
        import tensorflow as tf
        from PreWork import get_files, get_batch
        from Cnn2 import deep_CNN, losses, training, evaluation

        # 变量申明
        N_CLASSES = num_classes  # a,b,c,d
        IMG_W = 128
        IMG_H = 128
        BATCH_SIZE = 32  # 每个batch要放多少张图片
        CAPACITY = 200  # 一个队列最大多少
        MAX_STEP = max_step  # 一般大于10K
        learning_rate = 1e-3  #

        # 获取批次batch
        train_dir = directory
        logs_train_dir = save_path
        train, train_label, val, val_label = get_files(train_dir, 0.3)  # 验证集比例30%

        # 训练数据及标签
        train_batch, train_label_batch = get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

        # 测试数据及标签
        val_batch, val_label_batch = get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

        # 训练操作定义
        train_logits = deep_CNN(train_batch, BATCH_SIZE, N_CLASSES)
        train_loss = losses(train_logits, train_label_batch)
        train_op = training(train_loss, learning_rate)
        train_acc = evaluation(train_logits, train_label_batch)


        # 测试操作定义
        test_logits = deep_CNN(val_batch, BATCH_SIZE, N_CLASSES)
        test_loss = losses(test_logits, val_label_batch)
        test_op = training(test_loss, learning_rate)
        test_acc = evaluation(test_logits, val_label_batch)

        # 这个是log汇总记录
        summary_op = tf.compat.v1.summary.merge_all()

        # 产生一个会话
        sess = tf.compat.v1.Session()
        # 产生一个writer来写log文件
        train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
        # val_writer = tf.summary.FileWriter(logs_test_dir, sess.graph)
        saver = tf.compat.v1.train.Saver()
        # 所有节点初始化
        sess.run(tf.compat.v1.global_variables_initializer())
        # 队列监控
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        # 进行batch的训练
        try:
            # 执行MAX_STEP步的训练,一步一个batch
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break
                # 启动以下操作节点,

                _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
                _, Test_Loss, Test_Acc = sess.run([test_op, test_loss, test_acc])

                # 每隔50步打印一次当前的loss以及acc,同时记录log,写入writer
                if (step+1) % 26 == 0:
                    print('step %d,train loss =    %.2f,train accuracy =    %.2f%%' % (step + 1, tra_loss, tra_acc * 100))
                    print('step %d,test loss = %.2f,test accuracy = %.2f%%' % (step + 1, Test_Loss, Test_Acc * 100))
                    self._signal.emit('step %d,train loss =    %.2f,train accuracy =    %.2f%%' % (step + 1, tra_loss, tra_acc * 100))
                    self._signal.emit('step %d,test loss = %.2f,test accuracy = %.2f%%' % (step + 1, Test_Loss, Test_Acc * 100))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)
                # 每隔100步,保存一次训练好的模型
                if (step + 1) == MAX_STEP:
                    self._signal.emit("training finished")
                    checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
                    QApplication.processEvents()      # 这句话保证了程序在进行大事件处理时,可以刷新显示界面

        except tf.errors.OutOfRangeError:
            self._signal.emit('Done training -- epoch limit reached')

        finally:
            coord.request_stop()

        coord.join(threads)  # 把开启的线程加入主线程,等待threads结束
        sess.close()


    def LoadTrainFile(self):
        self.logfile = QFileDialog.getExistingDirectory(self, "选择文件夹")
        if len(self.logfile) != 0:
            self.load_saver.setStyleSheet("border-image: url(./button.png);color:blue")


    def OpenPicture(self):

        fname, _ = QFileDialog.getOpenFileName(self, "选择图片", " ", "Image files(*.jpg *.bmp *.*)")
        if len(fname) != 0:
            self.test_one.setStyleSheet("border-image: url(./button.png);color:blue")
            img = QPixmap(fname).scaled(self.label3.width(), self.label3.height())
            self.label3.setPixmap(img)
            image = Image.open(fname)
            # image = tf.cast(image, tf.float32)
            image = np.array(image.resize([128, 128]))
            if self.lists == []:
                self.label5.setText("请加载类别文件")
            else:
                self.classesa, self.classesb = evaluate_one_image(image_array=image, lists=self.lists, log_dir=self.logfile, N_CLASSES=self.num_classes)
                a = str(self.classesa)
                b = str(self.classesb)
                c = "类别为:" + a + b
                self.label5.setText(c)        #得到概率最大的标签
                self.label5.setStyleSheet("border-image: url(./button.png);color:#b50000")





    def GetClasses(self):
        fname, _ = QFileDialog.getOpenFileName(self, "选择文件", " ", "TXT(*.txt)")
        if len(fname) != 0:
            self.load_classes.setStyleSheet("border-image: url(./button.png);color:blue")
            temp = open(fname, 'r')
            self.lists = json.loads(temp.read())     #json.load()是用来读取文件的,json.loads()是用来读取字符串的
            self.num_classes = len(self.lists)

    def Validation(self):
        self.label7.setText("正在验证,请稍等")
        fname = QFileDialog.getExistingDirectory(self, "选择文件夹")
        # print(fname)
        if len(fname) != 0:
            self.load_val.setStyleSheet("border-image: url(./button.png);color:blue")
            self.num_classes = len(os.listdir(fname))
            # print(self.num_classes)
            # print(self.savepath)
            self.validation_accuracy = validation(fname, self.logfile, self.num_classes)
            accuracy = '验证准确率为:' + str(self.validation_accuracy * 100) + '%'
            self.label7.setText(accuracy)
    def jump_to_first(self):
        self.close()
        self.s = MainWindow()
        self.s.show()


class MyMainWindow1(QMainWindow, Ui_window):
    _signal = pyqtSignal(str)
    # _signal_times = pyqtSignal(int)
    def __init__(self):
        super(MyMainWindow1, self).__init__()
        _translate = QCoreApplication.translate
        self.setupUi(self)
        self.setWindowTitle("功能界面")
        self.setWindowIcon(QIcon('title.png'))
        self.label_xietitle.setText(_translate("window", "<html><head/><body><p align=\"center\"><span style=\" font-size:24pt; font-weight:600; color:#ff5500;\">油液磨粒自动识别系统</span><span style=\" font-size:18pt; font-weight:600; color:#ff0000; vertical-align:sub;\">(程度)</span></p></body></html>"))

        self.directory = ''
        self.num_classes = ''
        self.savepath = ''
        self.lineEdit.setPlaceholderText("650")
        self.learningtimes = 650
        self.logfile = ''
        self.classesa = ''
        self.classesb = ''
        self.lists = []
        self.validation_accuracy = ''
        self.valiadation_path = ''

        self.select_train.clicked.connect(self.OpenTrainPath)
        self.start_train.clicked.connect(self.StartTrain)
        self.train_saver.clicked.connect(self.SavePath)
        self.test_one.clicked.connect(self.OpenPicture)
        self.load_saver.clicked.connect(self.LoadTrainFile)
        self._signal.connect(self.ShowAccuracy)
        # self._signal_times.connect(self.getLearningTimes)
        self.lineEdit.returnPressed.connect(self.GetTimes)
        # self.lineEdit_2.returnPressed.connect(self.GetNumClasses)
        self.load_classes.clicked.connect(self.GetClasses)
        self.load_val.clicked.connect(self.Validation1)
        self.back.clicked.connect(self.jump_to_first)


    def OpenTrainPath(self):
        self.directory = QFileDialog.getExistingDirectory(self, "选择文件夹")
        # print(self.directory)
        if len(self.directory) != 0:
            self.select_train.setStyleSheet("border-image: url(./button.png);color:blue")
            self.num_classes = len(os.listdir(self.directory))
            for classes_list in os.listdir(self.directory):
                self.lists.append(classes_list)
            # print(self.lists)
            c_list = json.dumps(self.lists)
            list_save_path = self.directory + '.txt'
            a = open(list_save_path, 'w')
            a.write(c_list)
            a.close()
        # self.num_classes, _ = enumerate(os.listdir(self.directory))

    def SavePath(self):
        self.savepath = QFileDialog.getExistingDirectory(self, "选择文件夹")
        if len(self.savepath) != 0:
            self.train_saver.setStyleSheet("border-image: url(./button.png);color:blue")

    def GetTimes(self):
        # self.label_7.setText(self.lineEdit.text())
        self.learningtimes = int(self.lineEdit.text())

    def StartTrain(self):
        if self.directory != '' and self.savepath != '':
            self.start_train.setStyleSheet("border-image: url(./button.png);color:blue")
            self.label7.setText("Start Training")
            # print(self.directory)
            # print(self.num_classes)
            # print(self.learningtimes)
            self.train1(directory=self.directory, num_classes=self.num_classes, save_path=self.savepath,
                       max_step=self.learningtimes)
        else:
            self.label7.setText("请选择训练集和保存地址")

    def ShowAccuracy(self, str):
        self.label7.setText(str)


    def train1(self, directory, num_classes, save_path, max_step):
        import os
        import numpy as np
        import tensorflow as tf
        from PreWork import get_files1, get_batch
        from Cnn2 import deep_CNN, losses, training, evaluation

        # 变量申明
        N_CLASSES = num_classes  # a,b,c,d
        IMG_W = 128
        IMG_H = 128
        BATCH_SIZE = 32  # 每个batch要放多少张图片
        CAPACITY = 200  # 一个队列最大多少
        MAX_STEP = max_step  # 一般大于10K
        learning_rate = 1e-3  #

        # 获取批次batch
        train_dir = directory
        logs_train_dir = save_path
        train, train_label, val, val_label = get_files1(train_dir, 0.3)  # 验证集比例30%

        # 训练数据及标签
        train_batch, train_label_batch = get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

        # 测试数据及标签
        val_batch, val_label_batch = get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

        # 训练操作定义
        train_logits = deep_CNN(train_batch, BATCH_SIZE, N_CLASSES)
        train_loss = losses(train_logits, train_label_batch)
        train_op = training(train_loss, learning_rate)
        train_acc = evaluation(train_logits, train_label_batch)


        # 测试操作定义
        test_logits = deep_CNN(val_batch, BATCH_SIZE, N_CLASSES)
        test_loss = losses(test_logits, val_label_batch)
        test_op = training(test_loss, learning_rate)
        test_acc = evaluation(test_logits, val_label_batch)

        # 这个是log汇总记录
        summary_op = tf.compat.v1.summary.merge_all()

        # 产生一个会话
        sess = tf.compat.v1.Session()
        # 产生一个writer来写log文件
        train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
        # val_writer = tf.summary.FileWriter(logs_test_dir, sess.graph)
        saver = tf.compat.v1.train.Saver()
        # 所有节点初始化
        sess.run(tf.compat.v1.global_variables_initializer())
        # 队列监控
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        # 进行batch的训练
        try:
            # 执行MAX_STEP步的训练,一步一个batch
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break
                # 启动以下操作节点,

                _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
                _, Test_Loss, Test_Acc = sess.run([test_op, test_loss, test_acc])

                # 每隔50步打印一次当前的loss以及acc,同时记录log,写入writer
                if (step+1) % 26 == 0:
                    print('step %d,train loss =    %.2f,train accuracy =    %.2f%%' % (step + 1, tra_loss, tra_acc * 100))
                    print('step %d,test loss = %.2f,test accuracy = %.2f%%' % (step + 1, Test_Loss, Test_Acc * 100))
                    self._signal.emit('step %d,train loss =    %.2f,train accuracy =    %.2f%%' % (step + 1, tra_loss, tra_acc * 100))
                    self._signal.emit('step %d,test loss = %.2f,test accuracy = %.2f%%' % (step + 1, Test_Loss, Test_Acc * 100))
                    summary_str = sess.run(summary_op)
                    train_writer.add_summary(summary_str, step)
                # 每隔100步,保存一次训练好的模型
                if (step + 1) == MAX_STEP:
                    self._signal.emit("training finished")
                    checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
                    QApplication.processEvents()      # 这句话保证了程序在进行大事件处理时,可以刷新显示界面

        except tf.errors.OutOfRangeError:
            self._signal.emit('Done training -- epoch limit reached')

        finally:
            coord.request_stop()

        coord.join(threads)  # 把开启的线程加入主线程,等待threads结束
        sess.close()

    def LoadTrainFile(self):
        self.logfile = QFileDialog.getExistingDirectory(self, "选择文件夹")
        if len(self.logfile) != 0:
            self.load_saver.setStyleSheet("border-image: url(./button.png);color:blue")


    def OpenPicture(self):

        fname, _ = QFileDialog.getOpenFileName(self, "选择图片", " ", "Image files(*.jpg *.bmp *.*)")
        if len(fname) != 0:
            self.test_one.setStyleSheet("border-image: url(./button.png);color:blue")
            img = QPixmap(fname).scaled(self.label3.width(), self.label3.height())
            self.label3.setPixmap(img)
            image = Image.open(fname)
            # image = tf.cast(image, tf.float32)
            image = np.array(image.resize([128, 128]))
            if self.lists == []:
                self.label5.setText("请加载类别文件")
            else:
                self.classesa, self.classesb = evaluate_one_image(image_array=image, lists=self.lists, log_dir=self.logfile, N_CLASSES=self.num_classes)
                a = str(self.classesa)
                b = str(self.classesb)
                c = "类别为:" + a + b
                self.label5.setText(c)        #得到概率最大的标签
                self.label5.setStyleSheet("border-image: url(./button.png);color:#b50000")





    def GetClasses(self):
        fname, _ = QFileDialog.getOpenFileName(self, "选择文件", " ", "TXT(*.txt)")
        if len(fname) != 0:
            self.load_classes.setStyleSheet("border-image: url(./button.png);color:blue")
            temp = open(fname, 'r')
            self.lists = json.loads(temp.read())     #json.load()是用来读取文件的,json.loads()是用来读取字符串的
            self.num_classes = len(self.lists)

    def Validation1(self):
        self.label7.setText("正在验证,请稍等")
        fname = QFileDialog.getExistingDirectory(self, "选择文件夹")
        # print(fname)
        if len(fname) != 0:
            self.load_val.setStyleSheet("border-image: url(./button.png);color:blue")
            self.num_classes = len(os.listdir(fname))
            # print(self.num_classes)
            # print(self.savepath)
            self.validation_accuracy = validation1(fname, self.logfile, self.num_classes)
            accuracy = '验证准确率为:' + str(self.validation_accuracy * 100) + '%'
            self.label7.setText(accuracy)
    def jump_to_first(self):
        self.close()
        self.s = MainWindow()
        self.s.show()


class MainWindow(QMainWindow,Ui_MainWindow):
    def __init__(self):
        super(MainWindow, self).__init__()  # 调用父类的init
        self.setWindowIcon(QIcon('title.png'))
        self.setupUi(self)


        self.type.triggered.connect(self.jump_to_1)
        self.time.triggered.connect(self.jump_to_2)


    def jump_to_1(self):
        self.hide()
        self.s = MyMainWindow()
        self.s.show()

    def jump_to_2(self):
        self.hide()
        self.s = MyMainWindow1()
        self.s.show()

    def closeEvent(self, event):
        reply = QMessageBox.question(self, '提醒',
                                     "确定退出?", QMessageBox.Yes |
                                     QMessageBox.No, QMessageBox.No)

        if reply == QMessageBox.Yes:
            event.accept()
        else:
            event.ignore()


if __name__ == '__main__':

    app = QApplication(sys.argv)
    myMainWindow = MainWindow()  # 自定义的类例化
    myMainWindow.show()

    sys.exit(app.exec_())

PreWork.py

import os
import numpy as np
import math
import tensorflow as tf




train_dir = r'(训练集地址(绝对地址))'

a = []
label_a = []
b = []
label_b = []
c = []
label_c = []
d = []
label_d = []


#step1:获取Image_to_tfrecords.py文件运行生成后的图片路径
    #获取路径下所有的图片路径名,存放到
    # 对应的列表中,同时贴上标签,存放到label列表中
def get_files(file_dir,ratio):

    for file in os.listdir(file_dir + '/分类1'):  # 一定是自己创建的文件夹分类名字,这样才能读取
        a.append(file_dir + '/分类1' + '/' + file)
        label_a.append(0)
    for file in os.listdir(file_dir + '/分类2'):
        b.append(file_dir + '/分类2' + '/' + file)
        label_b.append(1)
    for file in os.listdir(file_dir + '/分类3'):
        c.append(file_dir + '/分类3' + '/' + file)
        label_c.append(2)
    for file in os.listdir(file_dir + '/分类4'):
        d.append(file_dir + '/分类4' + '/' + file)
        label_d.append(3)


    #打印出提取图片的情况,检测是否正确提取
    print("There are %d 分类1\nThere are %d 分类2\nThere are %d 分类3\nThere are %d 分类4\n"%(len(a),len(b),len(c),len(d)),end="")

#step2: 对生成图片路径和标签list做打乱处理把a,b,c,d合起来组成一个list(img和lab)
    # 合并数据numpy.hstack(tup)
    # tup可以是python中的元组(tuple)、列表(list),或者numpy中数组(array),
    # 函数作用是将tup在水平方向上(按列顺序)合并
    image_list = np.hstack((a,b,c,d))
    label_list = np.hstack((label_a,label_b,label_c,label_d))

    #利用shuffle,转置,随机打乱
    temp = np.array([image_list,label_list])    #转换成2维矩阵
    temp = temp.transpose()     #转置
    np.random.shuffle(temp)     #按行随机打乱顺序函数


    #将所有的img和lab转换成list
    all_image_list = list(temp[:,0])    #取出第0列数据,即图片路径
    all_label_list = list(temp[:,1])    #取出第1列数据,即图片标签

    #将所得list分为两部分,一部分用来train,一部分用来测试val
    #ratio是测试集比例
    n_sample = len(all_label_list)
    n_val = int(math.ceil(n_sample*ratio))  #测试样本数
    n_train = n_sample - n_val    #训练样本数

    tra_images = all_image_list[0:n_train]
    tra_labels = all_label_list[0:n_train]

    tra_labels = [int(float(i)) for i in tra_labels]

    val_images = all_image_list[n_train:-1]
    val_labels = all_label_list[n_train:-1]

    val_labels = [int(float(i)) for i in val_labels]


    return tra_images,tra_labels,val_images,val_labels


def get_files1(file_dir,ratio):

    for file in os.listdir(file_dir + '/分类1'):  # 一定是自己创建的文件夹分类名字,这样才能读取
        a.append(file_dir + '/分类1' + '/' + file)
        label_a.append(0)
    for file in os.listdir(file_dir + '/分类2'):
        b.append(file_dir + '/分类2' + '/' + file)
        label_b.append(1)
    for file in os.listdir(file_dir + '/分类3'):
        c.append(file_dir + '/分类3' + '/' + file)
        label_c.append(2)



    #打印出提取图片的情况,检测是否正确提取
    print("There are %d 分类1\nThere are %d 分类2\nThere are %d 分类3\n"%(len(a),len(b),len(c)),end="")

#step2: 对生成图片路径和标签list做打乱处理把a,b,c,d合起来组成一个list(img和lab)
    # 合并数据numpy.hstack(tup)
    # tup可以是python中的元组(tuple)、列表(list),或者numpy中数组(array),
    # 函数作用是将tup在水平方向上(按列顺序)合并
    image_list = np.hstack((a,b,c))
    label_list = np.hstack((label_a,label_b,label_c))

    #利用shuffle,转置,随机打乱
    temp = np.array([image_list,label_list])    #转换成2维矩阵
    temp = temp.transpose()     #转置
    np.random.shuffle(temp)     #按行随机打乱顺序函数

    #将所有的img和lab转换成list
    all_image_list = list(temp[:,0])    #取出第0列数据,即图片路径
    all_label_list = list(temp[:,1])    #取出第1列数据,即图片标签

    #将所得list分为两部分,一部分用来train,一部分用来测试val
    #ratio是测试集比例
    n_sample = len(all_label_list)
    n_val = int(math.ceil(n_sample*ratio))  #测试样本数
    n_train = n_sample - n_val    #训练样本数

    tra_images = all_image_list[0:n_train]
    tra_labels = all_label_list[0:n_train]

    tra_labels = [int(float(i)) for i in tra_labels]

    val_images = all_image_list[n_train:-1]
    val_labels = all_label_list[n_train:-1]

    val_labels = [int(float(i)) for i in val_labels]


    return tra_images,tra_labels,val_images,val_labels

#--------------------------生成batch------------------------

#step1:将上面生成的list传入get_batch(),转换类型,产生一个输入队列queue,因为img和lab是分开的
#所以使用tf.train.slice_input_producer(),然后用tf.read_file()从队列中读取图像
#   image_w,image_H :设置好固定的图像高度和宽度
#   设置batch_size : 每个batch要放多少张图片
#   capacity:一个队列最大多少

def get_batch(image,label,image_W,image_H,batch_size,capacity):
    #用tf.cast()转换类型
    image = tf.cast(image,tf.string)    #可变长度的字节数组,每一个张量元素都是一个字节数组
    label = tf.cast(label,tf.int32)     #

#    print(label)
    # tf.train.slice_input_producer是一个tensor生成器
    # 作用是按照设定,每次从一个tensor列表中按顺序或者随机抽取出一个tensor放入文件名队列。
    #make an input queue
    input_queue = tf.train.slice_input_producer([image,label])
    label = input_queue[1]
    image_contents = tf.io.read_file(input_queue[0])   # tf.read_file()从队列中读取图像

    #step2:将图像解码,使用相同类型的图像。不同类型的图像不能混合在一起,要么只用JPEG,要么只用PNG等
#    image = tf.image.decode_image(image_contents,channels=3)
    image = tf.image.decode_png(image_contents, channels=3)
    '''根据原文档图片格式改变'''
    # jpeg或者jpg格式都用decode_jpeg函数,其他格式可以去查看官方文档

    #step3:数据预处理,对图像进行旋转、缩放、裁剪、归一化等操作,让计算出的图形更健壮
    image = tf.image.resize_with_crop_or_pad(image,image_W,image_H)
    image = tf.image.per_image_standardization(image)   #对resize后的图片进行标准化处理



    image_batch, label_batch = tf.train.shuffle_batch([image, label],
                                                      batch_size=batch_size,
                                                      num_threads=32,
                                                      capacity=capacity,
                                                      min_after_dequeue=100)
    #重新排列label,行数为[batch_size]
    label_batch =tf.reshape(label_batch,[batch_size])

    image_batch = tf.cast(image_batch,tf.float32)

    return image_batch,label_batch
    # 获取两个batch,两个batch即为传入神经网络的数据


Cnn2.py

import tensorflow as tf

'''
    该网络结构包括:
        卷积池化层:3
        全连接层:2
        激活函数:ReLU
        Dropout、分类器;
'''
'''
在TensorFlow的世界里,变量的定义和初始化是分开的,所有关于图变量的赋值和计算都要通过tf.Session的run来进行。

想要将所有图变量进行集体初始化时应该使用tf.global_variables_initializer
tf.placehold与tf.Variable的区别:
    tf.placehold 占位符
        主要为真实输入数据和输出标签的输入, 用于在 feed_dict中的变量,不需要指定初始值,具体值在feed_dict中的变量给出。
    tf.Variable 主要用于定义weights bias等可训练会改变的变量,必须指定初始值。
        通过Variable()构造函数后,此variable的类型和形状固定不能修改了,但值可以用assign方法修改。

tf.get_variable和tf.Variable函数差别
相同点:通过两函数创建变量的过程基本一样,
        tf.variable函数调用时提供的维度(shape)信息以及初始化方法(initializer)的参数和tf.Variable函数调用时提供的初始化过程中的参数基本类似。
不同点:两函数指定变量名称的参数不同,
        对于tf.Variable函数,变量名称是一个可选的参数,通过name="v"的形式给出
        tf.get_variable函数,变量名称是一个必填的参数,它会根据变量名称去创建或者获取变量
'''
# batch_size = 20
# x = tf.placeholder(tf.float32, [batch_size,224,224,3])
# y = tf.placeholder(tf.float32, [batch_size,5])
# 函数申明
'''
def weight_variable(shape, n):
    # tf.truncated_normal(shape, mean, stddev)这个函数产生正态分布,均值和标准差自己设定。
    # shape表示生成张量的维度,mean是均值
    # stddev是标准差,,默认最大为1,最小为-1,均值为0
    initial = tf.truncated_normal(shape, stddev=n, dtype=tf.float32)
    return initial


def bias_variable(shape):
    # 创建一个结构为shape矩阵也可以说是数组shape声明其行列,初始化所有值为0.1
    initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
    return initial


def conv2d(x, w):
    # 卷积遍历各方向步数为1,SAME:边缘外自动补0,遍历相乘
    # padding 一般只有两个值
    # 卷积层后输出图像大小为:(W+2P-f)/stride+1并向下取整
    return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')  # [batch, height, width, channels]
    # strides[0] = 1,也即在 batch 维度上的移动为 1,也就是不跳过任何一个样本,否则当初也不该把它们作为输入(input)
    # strides[3] = 1,也即在 channels 维度上的移动为 1,也就是不跳过任何一个颜色通道;


def max_pooling(x, name):  # 2×2
    # 池化卷积结果(conv2d)池化层采用kernel大小为3*3,步数也为2,SAME:周围补0,取最大值。数据量缩小了4倍
    # x 是 CNN 第一步卷积的输出量,其shape必须为[batch, height, weight, channels];
    # ksize 是池化窗口的大小, shape为[batch, height, weight, channels]
    # stride 步长,一般是[1,stride, stride,1]
    # 池化层输出图像的大小为(W-f)/stride+1,向上取整
    return tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
'''

# 一个简单的卷积神经网络,卷积+池化层×3,全连接层×2,最后一个softmax层做分类
# 16个3×3的卷积核(3通道),padding='SAME',表示padding后卷积的图与原图尺寸一致,激活函数relu()
def deep_CNN(images, batch_size, n_classes):
    with tf.compat.v1.variable_scope('conv1') as scope:
        weights = tf.Variable(tf.random.truncated_normal(shape=[3, 3, 3, 16], stddev=1.0, dtype=tf.float32),
                              name='weights', dtype=tf.float32)

        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[16]),
                             name='biases', dtype=tf.float32)

        conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(pre_activation, name=scope.name)

        # 池化层1
        # 3x3最大池化,步长strides为2,池化后执行lrn()操作,局部响应归一化,对训练有利。
    with tf.compat.v1.variable_scope('pooling1_lrn') as scope:
        pool1 = tf.nn.max_pool2d(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pooling1')
        norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
        #卷积2
    with tf.compat.v1.variable_scope('conv2') as scope:
        weights = tf.Variable(tf.random.truncated_normal(shape=[3, 3, 16, 32], stddev=1.0, dtype=tf.float32),
                                  name='weights', dtype=tf.float32)

        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[32]),
                                 name='biases', dtype=tf.float32)

        conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(pre_activation, name=scope.name)

            # 池化层2
            # 3x3最大池化,步长strides为2,池化后执行lrn()操作,局部响应归一化(Local Response Normalization),对训练有利。
    with tf.compat.v1.variable_scope('pooling2_lrn') as scope:
        pool2 = tf.nn.max_pool2d(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pooling2')
        norm2 = tf.nn.lrn(pool2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')




        # 卷积层3
        # 16个3x3的卷积核(16通道),padding=’SAME’,表示padding后卷积的图与原图尺寸一致,激活函数relu()
    with tf.compat.v1.variable_scope('conv3') as scope:
        weights = tf.Variable(tf.random.truncated_normal(shape=[3, 3, 32, 64], stddev=0.1, dtype=tf.float32),
                              name='weights', dtype=tf.float32)

        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[64]),
                             name='biases', dtype=tf.float32)

        conv = tf.nn.conv2d(norm2, weights, strides=[1, 1, 1, 1], padding='SAME')
        pre_activation = tf.nn.bias_add(conv, biases)
        conv3 = tf.nn.relu(pre_activation, name='conv3')

        # 池化层3
        # 3x3最大池化,步长strides为2,执行lrn()操作再池化,
        # pool2 and norm2
    with tf.compat.v1.variable_scope('pooling3_lrn') as scope:
        norm3 = tf.nn.lrn(conv3, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
        pool3 = tf.nn.max_pool2d(norm3, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME', name='pooling2')


        # 全连接层4
        # 256个神经元,将之前pool层的输出reshape成一行,激活函数relu()
    with tf.compat.v1.variable_scope('local4') as scope:
        reshape = tf.reshape(pool3, shape=[batch_size, -1])
        dim = reshape.get_shape()[1].value
        weights = tf.Variable(tf.random.truncated_normal(shape=[dim, 256], stddev=0.005, dtype=tf.float32),
                              name='weights', dtype=tf.float32)

        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[256]),
                             name='biases', dtype=tf.float32)

        local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)

        # 全连接层5
        # 256个神经元,激活函数relu()
    with tf.compat.v1.variable_scope('local5') as scope:
        weights = tf.Variable(tf.random.truncated_normal(shape=[256, 256], stddev=0.005, dtype=tf.float32),
                              name='weights', dtype=tf.float32)

        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[256]),
                             name='biases', dtype=tf.float32)

        local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')

        # Softmax回归层
        # 将前面的FC层输出,做一个线性回归,计算出每一类的得分
    with tf.compat.v1.variable_scope('softmax_linear') as scope:
        weights = tf.Variable(tf.random.truncated_normal(shape=[256, n_classes], stddev=0.005, dtype=tf.float32),
                              name='softmax_linear', dtype=tf.float32)

        biases = tf.Variable(tf.constant(value=0.1, dtype=tf.float32, shape=[n_classes]),
                             name='biases', dtype=tf.float32)

        softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')

    return softmax_linear


# loss计算
# 传入参数:logits,网络计算输出值。labels,真实值,在这里是0或123
# 返回参数:loss,损失值
def losses(logits, lablels):

    with tf.variable_scope('loss') as scope:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=lablels,
                                                                       name='xentropy_per_example')

        loss = tf.reduce_mean(cross_entropy, name='loss')
        tf.compat.v1.summary.scalar(scope.name + '/loss', loss)
    return loss


# loss损失值优化

def training(loss, learning_rate):
    with tf.name_scope('optimizer'):
        optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
        # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
        global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = optimizer.minimize(loss, global_step=global_step)
    return train_op


# 评价计算/准确率计算
def evaluation(logits, labels):
    with tf.compat.v1.variable_scope('accuracy') as scope:
        correct = tf.nn.in_top_k(logits, labels, 1)
        correct = tf.cast(correct, tf.float32)
        accuracy = tf.reduce_mean(correct)
        tf.compat.v1.summary.scalar(scope.name + '/accuracy', accuracy)
    return accuracy

val.py

import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
from PreWork import get_files, get_files1, get_batch
from Cnn2 import deep_CNN, evaluation
import tensorflow as tf


BATCH_SIZE = 20

IMG_W = 128
IMG_H = 128
CAPACITY = 200



def validation(train_dir, log_dir, N_CLASS):
    BATCH_SIZE = 5
    IMG_W = 128
    IMG_H = 128
    CAPACITY = 200
    with tf.Graph().as_default():

        train, train_label, val, val_label = get_files(train_dir, 0.3)
        val_batch, val_label_batch = get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

        p = deep_CNN(val_batch, BATCH_SIZE, N_CLASS)

        validation_acc = evaluation(p, val_label_batch)

        '''加载训练完成的数据'''
        saver = tf.compat.v1.train.Saver()
        sess = tf.compat.v1.Session()
        ckpt = tf.train.get_checkpoint_state(log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        '''开启线程,启动队列'''
        coord = tf.train.Coordinator()
        thread = tf.train.start_queue_runners(sess=sess, coord=coord)

        total_acc = 0
        for time in range(100):
            acc = sess.run(validation_acc)
            total_acc += acc

    return total_acc / 100




def validation1(train_dir, log_dir, N_CLASS):
    BATCH_SIZE = 5
    IMG_W = 128
    IMG_H = 128
    CAPACITY = 200
    with tf.Graph().as_default():

        train, train_label, val, val_label = get_files1(train_dir, 0.3)
        val_batch, val_label_batch = get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

        p = deep_CNN(val_batch, BATCH_SIZE, N_CLASS)

        validation_acc = evaluation(p, val_label_batch)

        '''加载训练完成的数据'''
        saver = tf.compat.v1.train.Saver()
        sess = tf.compat.v1.Session()
        ckpt = tf.train.get_checkpoint_state(log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        '''开启线程,启动队列'''
        coord = tf.train.Coordinator()
        thread = tf.train.start_queue_runners(sess=sess, coord=coord)


        total_acc = 0
        for time in range(100):
            acc = sess.run(validation_acc)
            total_acc += acc

    return total_acc / 100

# if __name__ == '__main__':
#     # validation_dir = "G:/PyProject/20190715/17flowers/flowers"
#     # log_dir = 'G:/PyProject/20190715/17flowers'
#     # train_dir = r"E:\data/generate_self"
#     # log_dir = r'E:\data\ck'
#     # N_CLASS = 4
#     # train_dir = r'E:\window\graph'
#     # log_dir = r'E:\window\cknew'
#     acc = validation()
#     print(acc)

test.py

#=============================================================================
from PIL import Image
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from Cnn2 import deep_CNN


#=======================================================================
#获取一张图片
def get_one_image(train):
    #输入参数:train,训练图片的路径
    #返回参数:image,从训练图片中随机抽取一张图片
    n = len(train)
    print(n)
    ind = np.random.randint(0, n)
    img_dir = train[ind]   #随机选择测试的图片

    img = Image.open(img_dir)
    plt.imshow(img)
    plt.show()
    imag = img.resize([32, 32])  #由于图片在预处理阶段以及resize,因此该命令可略
                                #之前resize【256,256】,与之前训练的神经网络参数不匹配,将整段代码中的[256,256]改为[32,32]才正常运行
    image = np.array(imag)
    return image

#--------------------------------------------------------------------
#测试图片

def evaluate_one_image(image_array, lists, log_dir, N_CLASSES):
    with tf.Graph().as_default():
       BATCH_SIZE = 1


       image = tf.cast(image_array, tf.float32)
       image = tf.image.per_image_standardization(image)
       #print(str(image))
       image = tf.reshape(image, [1, 128, 128, 3])

       logit = deep_CNN(image,BATCH_SIZE,N_CLASSES)

       logit = tf.nn.softmax(logit)

       x = tf.compat.v1.placeholder(tf.float32, shape=[128, 128, 3])




       saver = tf.compat.v1.train.Saver()

       with tf.compat.v1.Session() as sess:
           tf.global_variables_initializer().run()

           print("Reading checkpoints...")
           ckpt = tf.train.get_checkpoint_state(log_dir)     #log_dir 通过main window手动选择路径得到
           if ckpt and ckpt.model_checkpoint_path:
               global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
               print(global_step)
               '''容易出问题'''
               saver.restore(sess, ckpt.model_checkpoint_path)
               print('Loading success, global_step is %s' % global_step)
           else:
               print('No checkpoint file found')

           prediction = sess.run(logit, feed_dict={x: image_array})
           max_index = np.argmax(prediction)    #numpy.argmax(array, axis) 用于返回一个numpy数组中最大值的索引值。
                                                # 当一组中同时出现几个最大值时,返回第一个最大值的索引值。

           print('预测的标签为:' + str(max_index)+' ' + str(lists[max_index]))
           print('预测的准确率为:', prediction)
           n = int(str(max_index))
           m = N_CLASSES-1
           prediction = [np.round(i, m) for i in prediction]
           print(prediction)
           prediction = np.array(prediction)
           return lists[max_index], str(prediction[:, n])



back.py

# -*- coding: utf-8 -*-

# Form implementation generated from reading ui file 'back.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again.  Do not edit this file unless you know what you are doing.
import base64
from background_png import img as background    #引入img变量,赋别名为tilte

...
tmp = open('background.png', 'wb')        #创建临时的文件
tmp.write(base64.b64decode(background))    ##把图片解码出来,写入文件中去。
tmp.close()



from PyQt5 import QtCore, QtGui, QtWidgets


class Ui_window(object):
    def setupUi(self, window):
        window.setObjectName("window")
        window.setFixedSize(959, 750)
        sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
        sizePolicy.setHorizontalStretch(0)
        sizePolicy.setVerticalStretch(0)
        sizePolicy.setHeightForWidth(window.sizePolicy().hasHeightForWidth())
        window.setSizePolicy(sizePolicy)
        window.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
        window.setStyleSheet("border-image: url(./background.png);\n"
"font: 75 12pt \"微软雅黑\";")
        self.label_xietitle = QtWidgets.QLabel(window)
        self.label_xietitle.setGeometry(QtCore.QRect(200, 30, 550, 151))
        sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
        sizePolicy.setHorizontalStretch(0)
        sizePolicy.setVerticalStretch(0)
        sizePolicy.setHeightForWidth(self.label_xietitle.sizePolicy().hasHeightForWidth())
        self.label_xietitle.setSizePolicy(sizePolicy)
        self.label_xietitle.setStyleSheet("border-image: url();")
        self.label_xietitle.setObjectName("label_xietitle")
        self.test_one = QtWidgets.QPushButton(window)
        self.test_one.setGeometry(QtCore.QRect(650, 250, 227, 28))
        self.test_one.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.test_one.setStyleSheet("border-image: url()")
        self.test_one.setCheckable(True)
        self.test_one.setAutoExclusive(True)
        self.test_one.setObjectName("test_one")
        self.label5 = QtWidgets.QLabel(window)
        self.label5.setGeometry(QtCore.QRect(590, 590, 341, 61))
        self.label5.setStyleSheet("border-image: url();")
        self.label5.setAlignment(QtCore.Qt.AlignCenter)
        self.label5.setWordWrap(True)
        self.label5.setObjectName("label5")
        self.label7 = QtWidgets.QLabel(window)
        self.label7.setGeometry(QtCore.QRect(310, 500, 291, 71))
        self.label7.setStyleSheet("border-image: url();")
        self.label7.setAlignment(QtCore.Qt.AlignCenter)
        self.label7.setWordWrap(True)
        self.label7.setObjectName("label7")
        self.label3 = QtWidgets.QLabel(window)
        self.label3.setGeometry(QtCore.QRect(640, 310, 256, 256))
        self.label3.setStyleSheet("border-image: url();")
        self.label3.setWordWrap(True)
        self.label3.setObjectName("label3")
        self.load_saver = QtWidgets.QPushButton(window)
        self.load_saver.setGeometry(QtCore.QRect(350, 250, 227, 28))
        self.load_saver.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.load_saver.setStyleSheet("border-image: url();")
        self.load_saver.setCheckable(True)
        self.load_saver.setChecked(False)
        self.load_saver.setAutoExclusive(True)
        self.load_saver.setDefault(False)
        self.load_saver.setFlat(False)
        self.load_saver.setObjectName("load_saver")
        self.load_classes = QtWidgets.QPushButton(window)
        self.load_classes.setGeometry(QtCore.QRect(351, 316, 227, 28))
        self.load_classes.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.load_classes.setStyleSheet("border-image: url();")
        self.load_classes.setCheckable(True)
        self.load_classes.setAutoExclusive(True)
        self.load_classes.setObjectName("load_classes")
        self.load_val = QtWidgets.QPushButton(window)
        self.load_val.setGeometry(QtCore.QRect(351, 381, 227, 28))
        self.load_val.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.load_val.setStyleSheet("border-image: url();")
        self.load_val.setCheckable(True)
        self.load_val.setAutoExclusive(True)
        self.load_val.setObjectName("load_val")
        self.label = QtWidgets.QLabel(window)
        self.label.setGeometry(QtCore.QRect(130, 470, 91, 31))
        self.label.setStyleSheet("border-image: url();")
        self.label.setWordWrap(True)
        self.label.setObjectName("label")
        self.lineEdit = QtWidgets.QLineEdit(window)
        self.lineEdit.setGeometry(QtCore.QRect(91, 526, 171, 24))
        self.lineEdit.setStyleSheet("border-image: url();")
        self.lineEdit.setObjectName("lineEdit")
        self.train_saver = QtWidgets.QPushButton(window)
        self.train_saver.setGeometry(QtCore.QRect(61, 316, 227, 28))
        sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
        sizePolicy.setHorizontalStretch(0)
        sizePolicy.setVerticalStretch(0)
        sizePolicy.setHeightForWidth(self.train_saver.sizePolicy().hasHeightForWidth())
        self.train_saver.setSizePolicy(sizePolicy)
        self.train_saver.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.train_saver.setStyleSheet("border-image: url();")
        self.train_saver.setCheckable(True)
        self.train_saver.setAutoExclusive(True)
        self.train_saver.setObjectName("train_saver")
        self.select_train = QtWidgets.QPushButton(window)
        self.select_train.setGeometry(QtCore.QRect(61, 250, 227, 28))
        sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
        sizePolicy.setHorizontalStretch(0)
        sizePolicy.setVerticalStretch(0)
        sizePolicy.setHeightForWidth(self.select_train.sizePolicy().hasHeightForWidth())
        self.select_train.setSizePolicy(sizePolicy)
        self.select_train.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.select_train.setStyleSheet("border-image: url();\n"
"")
        self.select_train.setCheckable(True)
        self.select_train.setChecked(False)
        self.select_train.setAutoExclusive(True)
        self.select_train.setFlat(False)
        self.select_train.setObjectName("select_train")
        self.start_train = QtWidgets.QPushButton(window)
        self.start_train.setGeometry(QtCore.QRect(61, 381, 227, 28))
        sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
        sizePolicy.setHorizontalStretch(0)
        sizePolicy.setVerticalStretch(0)
        sizePolicy.setHeightForWidth(self.start_train.sizePolicy().hasHeightForWidth())
        self.start_train.setSizePolicy(sizePolicy)
        self.start_train.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
        self.start_train.setFocusPolicy(QtCore.Qt.StrongFocus)
        self.start_train.setStyleSheet("border-image: url();")
        self.start_train.setCheckable(True)
        self.start_train.setAutoExclusive(True)
        self.start_train.setObjectName("start_train")
        self.back = QtWidgets.QPushButton(window)
        self.back.setGeometry(QtCore.QRect(0, -2, 93, 28))
        self.back.setStyleSheet("border-image: url();")
        self.back.setObjectName("back")
        self.test_one.raise_()
        self.label7.raise_()
        self.label5.raise_()
        self.label3.raise_()
        self.label_xietitle.raise_()
        self.back.raise_()


        self.retranslateUi(window)
        QtCore.QMetaObject.connectSlotsByName(window)

    def retranslateUi(self, window):
        _translate = QtCore.QCoreApplication.translate
        window.setWindowTitle(_translate("window", "名字"))
        self.test_one.setText(_translate("window", "识别图像"))
        self.label5.setText(_translate("window", "<html><head/><body><p align=\"center\"><br/></p></body></html>"))
        self.label7.setText(_translate("window", "<html><head/><body><p align=\"center\"><br/></p></body></html>"))
        self.label3.setText(_translate("window", "<html><head/><body><p align=\"center\"><br/></p></body></html>"))
        self.load_saver.setText(_translate("window", "加载训练数据"))
        self.load_classes.setText(_translate("window", "加载分类文本"))
        self.load_val.setText(_translate("window", "加载验证集"))
        self.label.setText(_translate("window", "<html><head/><body><p align=\"center\">训练次数</p></body></html>"))
        self.train_saver.setText(_translate("window", "训练数据保存位置"))
        self.select_train.setText(_translate("window", "选择训练集"))
        self.start_train.setText(_translate("window", "开始训练"))
        self.back.setText(_translate("window", "返回"))



将图像转换为py文件

pyinstaller打包程序时,只会打包py文件,所以软件及界面需要的图像需要先转换为py文件,然后再运行软件时调用

picture.py

# -*- coding: utf-8 -*-
# @Time    : 2018/6/6 18:29
# @Author  : Octan3
# @Email   : Octan3@stu.ouc.edu.cn
# @File    : Pic2py.py
# @Software: PyCharm

import base64


def pic2py(picture_name):
    """
    将图像文件转换为py文件
    :param picture_name:
    :return:
    """
    open_pic = open("%s" % picture_name, 'rb')
    b64str = base64.b64encode(open_pic.read())
    open_pic.close()
    # 注意这边b64str一定要加上.decode()
    write_data = 'img = "%s"' % b64str.decode()
    f = open('%s.py' % picture_name.replace('.', '_'), 'w+')
    f.write(write_data)
    f.close()


if __name__ == '__main__':
    pics = ["background.png"]#当前路径下图像名字#
    for i in pics:
        pic2py(i)
    print("done")

Pyinstaller 先将主函数打包,形成一个 .spec 的文件,通过编辑该文件,将所有要打包的程序名字都添加在合适的位置,然后还可以修改别的设置,例如更换图标(.icon)、是否展示控制台、修改名字等。

openwindow.spec

# -*- mode: python ; coding: utf-8 -*-


block_cipher = None


a = Analysis(['openwindow.py', 'PreWork.py', 'Cnn2.py', 'val.py', 'test.py', 'back.py', 'open.py', 'background_png.py', 'button_png.py', 'title_png.py'],
             pathex=['E:\window'],
             binaries=[],
             datas=[],
             hiddenimports=[],
             hookspath=[],
             hooksconfig={},
             runtime_hooks=[],
             excludes=[],
             win_no_prefer_redirects=False,
             win_private_assemblies=False,
             cipher=block_cipher,
             noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
             cipher=block_cipher)

exe = EXE(pyz,
          a.scripts,
          a.binaries,
          a.zipfiles,
          a.datas,
          [],
          name='名字',
          debug=False,
          bootloader_ignore_signals=False,
          strip=False,
          upx=True,
          upx_exclude=[],
          runtime_tmpdir=None,
          console=False,
          icon='E:\window/软件图标.ico',
          disable_windowed_traceback=False,
          target_arch=None,
          codesign_identity=None,
          entitlements_file=None )

在对应路径下执行(win+R--cmd--e:--conda activate--cd 文件)

pyinstaller -F openwindow.spec

QT desingner 使用注意

ui文件命名不可有空格
designer调取时路径格式和 .py调用图片路径格式不一样(qrc里的“back”是虚拟文件夹)
        # self.listView.setStyleSheet("border-image: url(:/back/images/tai.png);")
        self.listView.setStyleSheet("border-image: url(./images/tai.png);") 

制作.qrc格式文件
1.pycharm创建new file,选择XML格式,正常编写内容(
<RCC>
  <qresource prefix="back">
    <file>images/taili.png</file>
    <file>images/tai.png</file>
    <file>images/button.png</file>
  </qresource>
</RCC>
),之后重命名为name.qrc,QTdesigner就可以从所在路径加载qrc里的图像资源
2.在QTdesigner里加载资源时,从铅笔图标里创建新的qrc文件(白纸图标),然后pycharm打开编辑即可

QMainWindow,QFileDialog,QApplication,QMessageBox

  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值