机器学习实现二分类(测试可用)

__author__ = 'Administrator'

import tensorflow as tf
import numpy as np
import os
import glob
from sklearn.utils import shuffle
import cv2
import pandas as pd
import matplotlib.pyplot as plt

def load_train(train_path,img_size,classes):
    images=[]
    labels=[]
    img_names=[]
    cls=[]
    print("读取训练图片...")
    for fields in classes:
        index=classes.index(fields)
        print("Now going to read {} files (Index:{})".format(fields,index))
        path=os.path.join(train_path,fields,'*g')
        files=glob.glob(path)
        for fl in files:
            image=cv2.imread(fl)
            image=cv2.resize(image,(img_size,img_size),0,0,cv2.INTER_LINEAR)
            image=image.astype(np.float32)
            image=np.multiply(image,1.0/255.0)
            images.append(image)
            label=np.zeros(len(classes))
            label[index]=1.0
            labels.append(label)
            flbase=os.path.basename(fl)
            img_names.append(flbase)
            cls.append(fields)
    images=np.array(images)
    labels=np.array(labels)
    img_names=np.array(img_names)
    cls=np.array(cls)
    return images,labels,img_names,cls
class DataSet(object):
    def __init__(self,images,labels,img_names,cls):
        self._num_examples = images.shape[0]
        self._images=images
        self._labels=labels
        self._img_names=img_names
        self._cls=cls
        self._epochs_done=0
        self._index_in_epoch=0
    def images(self):
        return self._images
    def labels(self):
        return self._labels
    def img_names(self):
        return self._img_names
    def cls(self):
        return self._cls
    def num_examples(self):
        return self._num_examples
    def epochs_done(self):
        return self._epochs_done
    def next_batch(self,batch_size):
        start=self._index_in_epoch
        self._index_in_epoch+=batch_size

        if self._index_in_epoch>self._num_examples:
            self._epochs_done+=1
            start=0
            self._index_in_epoch=batch_size
        else:
            batch_size<=self._num_examples
        end=self._index_in_epoch
        return self._images[start:end],self._labels[start:end],self._img_names[start:end],self._cls[start:end]
def read_train_sets(train_path,image_size,classes,validation_size):
    class DataSets(object):    #构造类
        pass
    data_sets=DataSets()
    images,labels,img_names,cls=load_train(train_path,image_size,classes)   #读图,load_train:分别去每个类别中取图片并进行one-hot编码,resize图片大小
                                                                            #改变为np.float32格式,image/255(归一化),
    images,labels,img_names,cls=shuffle(images,labels,img_names,cls)     #将图片顺序洗牌
    if isinstance(validation_size, float):
        validation_size = int(validation_size * images.shape[0])#shape[0]计算图片总数,shape格式 [1000,64,64,3];验证集200个
        validation_images = images[:validation_size]     #图片从第一张到测试集个数
        validation_labels = labels[:validation_size]    #标签从第一张到测试集个数
        validation_img_names = img_names[:validation_size]  #name从第一张到测试集个数
        validation_cls = cls[:validation_size]  #类从第一张到测试集个数

        train_images = images[validation_size:]  #训练集,从测试集个数直到最后
        train_labels = labels[validation_size:]
        train_img_names = img_names[validation_size:]
        train_cls = cls[validation_size:]

        data_sets.train = DataSet(train_images, train_labels, train_img_names, train_cls)
        data_sets.valid = DataSet(validation_images, validation_labels, validation_img_names, validation_cls)
        return data_sets        #数据集构造完成

def create_convolution_layer(input,
                 num_input_channels,
                 conv_filter_size,
                 num_filters):
    weights = create_weights(shape=[conv_filter_size, conv_filter_size, num_input_channels, num_filters])
    biases = create_biases(num_filters)
    layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1], padding='SAME')
    layer += biases
    layer = tf.nn.relu(layer)
    layer = tf.nn.max_pool(value=layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
    #可视化
    return layer
def create_flatten_layer(layer):
    layer_shape=layer.get_shape()
    num_features=layer_shape[1:4].num_elements()
    layer=tf.reshape(layer,[-1,num_features])
    return layer
def create_fc_layer(input,
                    num_inputs,
                    num_outputs,
                    use_relu=True):
    weights=create_weights(shape=[num_inputs,num_outputs])
    biases=create_biases(num_outputs)

    layer=tf.matmul(input,weights)+biases

    # show train data and predict data

    layer=tf.nn.dropout(layer,keep_prob=0.7)
    if use_relu:
        layer=tf.nn.relu(layer)
    return layer
def show_progress(epoch,feed_dict_train,feed_dict_validate,val_loss,i):
    acc=session.run(accuracy,feed_dict=feed_dict_train)
    val_acc=session.run(accuracy,feed_dict=feed_dict_validate)
    print("epoch:",str(epoch+1)+",i:",str(i)+
          ",acc:",str(acc)+",val_acc:",str(val_acc)+",val_loss:",str(val_loss))


import random
random.seed(10)   #指定随机数,seed可以实现随机数的复现,即不同人在不同时间使用同一个seed,生成的随机数是完全一致的。
                  #输出1个seed为10(这个数可以随意取)的随机数
from tensorflow import set_random_seed   #tensorflow中的生成随机数,与上述seed类似
set_random_seed(20)


batch_size=32  #一次迭代32张图片
classes=['cashang','huahen']    #分类的类别
num_classes=len(classes)   #类别数目,最后一层的输出个数

validation_size=0.2   #20%作为测试集
img_size=128          #图片尺寸128*128,输入到网络中的图片应该是大小相同的(一般是正方形),但是原图可以不同,需要reshape
num_channels=3     #彩色图,RGB三个通道
train_path='C:\\Users\Administrator\Desktop\\tr'   #路径
data=read_train_sets(train_path,img_size,classes,validation_size)   #跳入dataset_TYD读取图片
print("Complete reading input data.Will Now print a snippet of it")
#print("Number of files in Training-set:\t\t{}".format(len(data.train.labels)))

session=tf.Session()
x=tf.placeholder(tf.float32,shape=[batch_size, 128, 128, 3],name='x')   #先把数据指定成placeholder,None:batch_size还未指定
y_true=tf.placeholder(tf.float32,shape=[batch_size,2],name='y_true')
y_true_cls=tf.argmax(y_true,1)   #tf.argmax(input,axis)根据axis取值的不同返回每行或者每列最大值的索引
#卷积神经网络,参数可变
filter_size_conv1=3
num_filters_conv1=32

filter_size_conv2=3
num_filters_conv2=32

filter_size_conv3=3
num_filters_conv3=64

#全连接层的输出
fc_layer_size=1024
def create_weights(shape):
    return tf.Variable(tf.truncated_normal(shape, stddev=0.05))

def create_biases(size):
    return tf.Variable(tf.constant(0.05, shape=[size]))

layer_conv1=create_convolution_layer(input=x,
                                     num_input_channels=num_channels,
                                     conv_filter_size=filter_size_conv1,
                                     num_filters=num_filters_conv1)
layer_conv2=create_convolution_layer(input=layer_conv1,
                                     num_input_channels=num_filters_conv1,
                                     conv_filter_size=filter_size_conv2,
                                     num_filters=num_filters_conv2)
layer_conv3=create_convolution_layer(input=layer_conv2,
                                     num_input_channels=num_filters_conv2,
                                     conv_filter_size=filter_size_conv3,
                                     num_filters=num_filters_conv3)
layer_flat=create_flatten_layer(layer_conv3)

layer_fc1=create_fc_layer(input=layer_flat,
                          num_inputs=layer_flat.get_shape()[1:4].num_elements(),
                          num_outputs=fc_layer_size,
                          use_relu=True)
layer_fc2=create_fc_layer(input=layer_fc1,
                          num_inputs=fc_layer_size,
                          num_outputs=num_classes,
                          use_relu=False)

y_pred=tf.nn.softmax(layer_fc2,name='y_pred')
y_pred_cls=tf.argmax(y_pred,axis=1)

session.run(tf.global_variables_initializer())

cross_entropy=tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,labels=y_true)
cost=tf.reduce_mean(cross_entropy)
#新加
tf.summary.scalar("cost functon", cost)  # 把loss函数以标量显示

optimizer=tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)

saver = tf.train.Saver(max_to_keep=2)

correct_prediction=tf.equal(y_pred_cls,y_true_cls)
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
session.run(tf.global_variables_initializer())

batch_size=32

def train(num_iteration):
    global total_iterations
    for i in range(total_iterations,total_iterations+num_iteration):
        x_batch,y_true_batch,_,cls_batch=data.train.next_batch(batch_size)
        x_valid_batch,y_valid_batch,_,valid_cls_batch=data.valid.next_batch(batch_size)

        feed_dict_tr={x:x_batch,y_true:y_true_batch}
        feed_dict_val={x:x_valid_batch,y_true:y_valid_batch}
        session.run(optimizer,feed_dict_tr)
        examples=data.train.num_examples()
        if i% int(examples/batch_size)==0:
            val_loss=session.run(cost,feed_dict=feed_dict_tr)
            epoch=int(i/int(examples/batch_size))

            show_progress(epoch,feed_dict_tr,feed_dict_val,val_loss,i)

            saver.save(session,'./cashang-huahen-model/cashang-huahen.ckpt',global_step=i)
    total_iterations+=num_iteration
    print("final loss:", session.run(cost, feed_dict=feed_dict_tr))
session.run(tf.global_variables_initializer())
total_iterations=0
#saver=tf.train.Saver()   #保存模型
train(8000)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值