tensorflow 训练fruit3602

数据预处理完整后,开始训练模型:这里偷了一个懒用了slim模型:

def vgg_model(inputs,
           num_classes=classes,
           is_training=True,
           dropout_keep_prob=0.5,
           spatial_squeeze=True,
           scope='vgg_16',
           fc_conv_padding='VALID',
           global_pool=False):
  with tf.variable_scope(scope, 'vgg_models', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
                        outputs_collections=end_points_collection):
      net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
      net = slim.max_pool2d(net, [2, 2], scope='pool1')#50
      net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
      net = slim.max_pool2d(net, [2, 2], scope='pool2')#25
      net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
      net = slim.max_pool2d(net, [2, 2], scope='pool3')#13
      net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
      net = slim.max_pool2d(net, [2, 2], scope='pool4')#7
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
      net = slim.max_pool2d(net, [2, 2], scope='pool5')#4
      net = slim.conv2d(net, 2048, [3, 3], padding=fc_conv_padding, scope='fc6')
      net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
                         scope='dropout6')
      net = slim.conv2d(net, 2048, [1, 1], scope='fc7')
      # Convert end_points_collection into a end_point dict.
      end_points = slim.utils.convert_collection_to_dict(end_points_collection)
      if global_pool:
        net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
        end_points['global_pool'] = net
      if num_classes:
        net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
                           scope='dropout7')
        net = slim.conv2d(net, num_classes, [1, 1],
                          activation_fn=None,
                          normalizer_fn=None,
                          scope='fc8')
        if spatial_squeeze:
          net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net,_

定义model的learning-rate:

cross_entropy = tf.reduce_mean (tf.nn.softmax_cross_entropy_with_logits_v2( labels=y_, logits=logits))
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), tf.argmax(y_, 1)), tf.float32))

训练开始:

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    base = []
    while True and not coord.should_stop():
        train_batch , train_label = sess.run([train_x,train_y])
        #print(train_label)
        train_label = data.ChangeOneHot(train_label)
        _,loss = sess.run([train_step, cross_entropy],feed_dict = {x:train_batch,y_:train_label,keep2:0.5,train:True,keep1:0.5})
        if index % 200 == 0:
            test_batch , test_label = sess.run([test_x,test_y])
            test_label = data.ChangeOneHot(test_label)
            #print(test_label)
            acc = accuracy.eval({x:test_batch, y_:test_label,keep2:1.0,train:True,keep1:1.0 })
            print(get_now_time(),'step:',index, "acc:", acc, "  loss:", loss)
            getlogs(get_now_time()+' step :'+str(index)+' acc:'+str(acc)+' loss: '+str(loss))
            if acc>0.95 and index >1000:
                #print(get_now_time(),index, "acc:", acc, "  loss:", loss)
                saver.save(sess, save_model + '/train.ckpt', global_step = index)
            if index ==2000:
                #print(get_now_time(),index, "acc:", acc, "  loss:", loss)
                base.append(acc)
                saver.save(sess, save_model + '/train1.ckpt', global_step = index)
            if index > 2000 and base:
                before = base.pop()
                base.append(before)########################
                if acc > before :
                    base.append(acc)
                    saver.save(sess, save_model + '/train1.ckpt', global_step = index)
        index += 1
        if index == 50000:
            saver.save(sess, save_model + '/train.ckpt', global_step = index)
            break
    coord.request_stop()
    coord.join(threads=threads)

训练的截图显示:

这里可以这里迭代的次数更多,我测试30000次,其中的效果是非常好的:

测试如下:

测试一下几种水果:

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
以下是基于 TensorFlow 的最大均值差异(Max-Mean Discrepancy,MMD)进行水果图像分类的完整代码: ```python import tensorflow as tf import numpy as np import os import matplotlib.pyplot as plt from PIL import Image # 定义参数 batch_size = 32 learning_rate = 0.001 num_epochs = 50 num_classes = 3 # 加载数据集 def load_dataset(): train_dataset = tf.keras.preprocessing.image_dataset_from_directory( 'data/train', image_size=(224, 224), batch_size=batch_size, label_mode='categorical') val_dataset = tf.keras.preprocessing.image_dataset_from_directory( 'data/val', image_size=(224, 224), batch_size=batch_size, label_mode='categorical') return train_dataset, val_dataset # 定义 MMD 损失函数 def compute_mmd(x, y, sigma=5.0): x_kernel = tf.exp(-tf.square(tf.norm(x[:, tf.newaxis, :] - x[tf.newaxis, :, :], axis=2)) / (2 * sigma ** 2)) y_kernel = tf.exp(-tf.square(tf.norm(y[:, tf.newaxis, :] - y[tf.newaxis, :, :], axis=2)) / (2 * sigma ** 2)) xy_kernel = tf.exp(-tf.square(tf.norm(x[:, tf.newaxis, :] - y[tf.newaxis, :, :], axis=2)) / (2 * sigma ** 2)) mmd = tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel) return mmd # 定义模型 def create_model(): base_model = tf.keras.applications.ResNet50( input_shape=(224, 224, 3), include_top=False, weights='imagenet') for layer in base_model.layers: layer.trainable = False inputs = tf.keras.Input(shape=(224, 224, 3)) x = tf.keras.applications.resnet50.preprocess_input(inputs) x = base_model(x, training=False) x = tf.keras.layers.GlobalAveragePooling2D()(x) outputs = tf.keras.layers.Dense(num_classes, activation='softmax')(x) model = tf.keras.Model(inputs, outputs) return model # 定义训练函数 def train(train_dataset, val_dataset): model = create_model() optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) train_loss = tf.keras.metrics.Mean(name='train_loss') train_acc = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy') val_loss = tf.keras.metrics.Mean(name='val_loss') val_acc = tf.keras.metrics.CategoricalAccuracy(name='val_accuracy') # 定义 MMD 损失函数 def mmd_loss(y_true, y_pred): features = model.layers[-2].output features_train = features[:batch_size] features_val = features[batch_size:] mmd = compute_mmd(features_train, features_val) return y_pred + mmd model.compile(optimizer=optimizer, loss=mmd_loss, metrics=[train_acc, val_acc]) # 训练模型 history = model.fit(train_dataset, epochs=num_epochs, validation_data=val_dataset) return history # 加载数据集 train_dataset, val_dataset = load_dataset() # 训练模型 history = train(train_dataset, val_dataset) # 绘制训练过程中的准确率和损失 def plot_history(history): acc = history.history['train_accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['train_loss'] val_loss = history.history['val_loss'] epochs_range = range(num_epochs) plt.figure(figsize=(16, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() plot_history(history) # 测试模型 def test_model(model): test_dir = 'data/test' fruit_names = sorted(os.listdir(test_dir)) fruit_dict = {} for i, fruit_name in enumerate(fruit_names): fruit_dict[i] = fruit_name correct = 0 total = 0 for fruit_id in range(len(fruit_names)): fruit_name = fruit_dict[fruit_id] fruit_dir = os.path.join(test_dir, fruit_name) for filename in os.listdir(fruit_dir): img = Image.open(os.path.join(fruit_dir, filename)) img = img.resize((224, 224)) img = np.array(img) / 255.0 img = img[np.newaxis, ...] pred = model.predict(img) pred_id = np.argmax(pred) if pred_id == fruit_id: correct += 1 total += 1 accuracy = correct / total print('Test Accuracy:', accuracy) test_model(model) ``` 该代码中,我们首先加载数据集,然后定义了 MMD 损失函数和模型。在训练函数中,我们编译模型并使用 MMD 损失函数进行训练。最后,我们绘制了训练过程中的准确率和损失,并测试了模型在测试集上的准确率。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值