cs231n 基于CIFAR10的 svm_loss 实现

内容概括

数据集 – CIFAR10

60000张 彩色图像 – 这些图像是32*32,分为10个类,每类6000张图。
50000张 – 用于训练 – 构成了5个训练批,每一批10000张图;
10000张 – 用于测试 – 单独构成一批

下载链接:http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz

loss function :SVM

**

步骤

**
对于某组参数每一个不正确的类别

  1. 不正确与正确类别的结果之 minus
  2. minus +1
  3. 与0比较取最大值
  4. 求和

代码解析

· CIFAR10数据读入

import numpy as np
import os
import pickle
def load_CIFAR_batch(filename):
    with open (filename,'rb') as f:                           //打开文件
        datadict = pickle.load(f,encoding='iso-8859-1')       //文件内容加载
        X=datadict['data']
        Y=datadict['labels']
        X=X.reshape(10000,3,32,32).transpose(0,2,3,1).astype("float")    //图片
        Y=np.array(Y)
        return X,Y
def load_CIFAR10(ROOT):
    xs=[]
    ys=[]
    for b in range(1,6):
        f=os.path.join(ROOT,'data_batch_%d'%(b,))
        X,Y=load_CIFAR_batch(f)
        xs.append(X)
        ys.append(Y)
    Xtr=np.concatenate(xs)
    Ytr=np.concatenate(ys)
    del X,Y
    Xte,Yte=load_CIFAR_batch(os.path.join(ROOT,'test_batch'))
    return Xtr,Ytr,Xte,Yte

·数据shape查看

import random
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
cifar10_dir='D://ML//cifar-10-batches-py//'
X_train,y_train,X_test,y_test=load_CIFAR10(cifar10_dir)
print('trainning data shape:',X_train.shape)
print('trainning label shape:',y_train.shape)
print('test data.shape:',X_test.shape)
print('test labels shape:',y_test.shape)

输出

trainning data shape: (50000, 32, 32, 3)
trainning label shape: (50000,)
test data.shape: (10000, 32, 32, 3)
test labels shape: (10000,)

·图片显示

classes=['plane','car','bird']
num_classes=len(classes)
samples_per_class=7
for y, cls in enumerate(classes):
    idxs=np.flatnonzero(y_train==y)
    idxs=np.random.choice(idxs,samples_per_class,replace=False)
    for i,idx in enumerate(idxs):
        plt_idx=i*num_classes+y+1
        plt.subplot(samples_per_class,num_classes,plt_idx)
        plt.imshow(X_train[idx].astype('uint8'))
        plt.axis('off')
        if i ==0 :
            plt.title(cls)
plt.show()

输出

在这里插入图片描述

·分割验证集训练集开发集测试集

num_training = 49000
num_test = 1000
num_dev = 500
num_validation = 1000

#验证集   原始训练集中分割出来的长度为 num_validation 的数据样本点
mask = range(num_training, num_training + num_validation)  //按序列分割
X_val = X_train[mask]            //array[列表] 访问
y_val = y_train[mask]

#创建训练集  是原始训练集中前 num_training 个样本
mask=range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]

#创建开发集   训练集中随机抽取一部分作为开发集
mask=np.random.choice(num_training,num_dev,replace=False)
X_dev=X_train[mask]
y_dev=y_train[mask]
#测试集  测试集中 num_test 个只作为测试集
mask=range(num_test)
X_test=X_test[mask]
y_test=y_test[mask]


print("train data shape:",X_train.shape)
print("train labels shape:",y_train.shape)
print("validation data shape:",X_val.shape)
print("Validation labels shape:",y_val.shape)
print("test data shape:",X_test.shape)
print("test labels shape:",y_test.shape)

输出

train data shape: (49000, 32, 32, 3)
train labels shape: (49000,)
validation data shape: (1000, 32, 32, 3)
Validation labels shape: (1000,)
test data shape: (1000, 32, 32, 3)
test labels shape: (1000,)

·将图片转化为二维数组

X_train=np.reshape(X_train,(X_train.shape[0],-1))      //使用 reshape 函数重塑
X_val=np.reshape(X_val,(X_val.shape[0],-1))
X_test=np.reshape(X_test,(X_test.shape[0],-1))
X_dev=np.reshape(X_dev,(X_dev.shape[0],-1))

print("train data shape:",X_train.shape)
print("validation data shape:",X_val.shape)
print("test data shape:",X_test.shape)

输出

train data shape: (49000, 3072)
validation data shape: (1000, 3072)
test data shape: (1000, 3072)

·减去图像的平均值

mean_image=np.mean(X_train,axis=0)
print(mean_image.shape)
print(mean_image[:10])
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8'))
plt.show

输出

(3072,)
[130.64189796 135.98173469 132.47391837 130.05569388 135.34804082
131.75402041 130.96055102 136.14328571 132.47636735 131.48467347]
在这里插入图片描述

·均值中心化

#训练集和测试图集分别减去均值
X_train -= mean_image
#X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image

·x增加偏置维度

使用hstack函数

X_train = np.hstack([X_train,np.ones((X_train.shape[0],1))])
X_val = np.hstack([X_val,np.ones((X_val.shape[0],1))])
X_test = np.hstack([X_test,np.ones((X_test.shape[0],1))])
X_dev = np.hstack([X_dev,np.ones((X_dev.shape[0],1))])

print(X_train.shape,X_val.shape,X_test.shape)

输出

(49000, 3073) (1000, 3073) (1000, 3073)

·SVM_loss

#SVM_gradient
def SVM_loss_native(w,dw,X_train,y_train):
    #w = np.random.randn(3073,10)*0.00001
    #dw = np.zeros(w.shape)
    SVM_loss=0
    for i in range(X_train.shape[0]):
        y = np.dot(X_train[i],w)
        for element in range(10):
            if( element != y_train[i]):
                SVM_loss += np.maximum(0,y[element]-y[y_train[i]]+1)    #损失函数计算
                if(np.maximum(0,y[element]-y[y_train[i]]+1)!=0):
                    dw[:,element] += X_train[i,:].T                  #对应梯度
                    dw[:,y_train[i]] -= X_train[i,:].T
    SVM_loss = SVM_loss/X_train.shape[0]  
    dw=dw/X_train.shape[0]
    SVM_loss = SVM_loss+0.00001*np.sum(w*w)        #正则化
    dw=dw+0.00001*w
    return SVM_loss,dw

· SGD

def train(dw,X_train,y_train,w,lr):
    min = 1000000
   # lr = 0.0000001
    loss=0
    v = 0
   # w = np.random.randn(3073,10)*0.00001
    while(1):
        v = 0.9 * v - lr * dw       #momentum
        w = w + v
        #w=-dw*lr+w             #SGD
        for i in range(X_train.shape[0]):
            y = np.dot(X_train[i],w)
            for element in range(10):
                if( element != y_train[i]):
                    loss += np.maximum(0,y[element]-y[y_train[i]]+1)
        if(min>loss):
            min=loss
        else:
            break
    ans_right = []
    ans_wrong = []
    for i in range(X_train.shape[0]):
        y = np.dot(X_train[i],w)
        for y_iterator,y_max in enumerate(y):
            if y_max==np.max(y):
                #print (y_iterator,y_max)
                break
        if(y_iterator==y_train[i]):
            ans_right.append(y_max)
        else:
            ans_wrong.append(y_max)
#print(len(ans_right))
#print(len(ans_wrong))
    return len(ans_right),len(ans_wrong),w

· 采样

w = np.random.randn(3073,10)*0.00001       //初始化权重
dw=np.zeros(w.shape)                       //初始化梯度
k = 0
loss_history=[]
for i in range(49):
    k+=1000
    batch_inx=np.random.choice(49000,1000,replace=False)         //随机选择一个batch
    X_batch = X_train[batch_inx,:]
    y_batch = y_train[batch_inx]

    SVM_loss,dw=SVM_loss_native(w,dw,X_batch,y_batch)
    loss_history.append(SVM_loss)
    #print(loss_history)
    right,wrong,w = train(dw,X_batch,y_batch,w,0.0000001)
    print('iteration', k,'/49000:loss',SVM_loss*10)
    print('accuracy:',right/(right+wrong))

输出

iteration 1000 /49000:loss 89.5845249861141
accuracy: 0.273
iteration 2000 /49000:loss 71.10205479511572
accuracy: 0.266
iteration 3000 /49000:loss 65.87297888265323
accuracy: 0.304
iteration 4000 /49000:loss 60.33047435372527
accuracy: 0.346
iteration 5000 /49000:loss 62.27605108705603
accuracy: 0.316
iteration 6000 /49000:loss 58.79108111114377
accuracy: 0.32
iteration 7000 /49000:loss 56.934177456645216
accuracy: 0.343
iteration 8000 /49000:loss 56.71183937502347
accuracy: 0.338
iteration 9000 /49000:loss 54.885774583423114
accuracy: 0.329
iteration 10000 /49000:loss 53.528802976797905
accuracy: 0.35
iteration 11000 /49000:loss 52.53138673478002
accuracy: 0.359
iteration 12000 /49000:loss 54.12935319123772
accuracy: 0.352
iteration 13000 /49000:loss 51.404638932014535
accuracy: 0.377
iteration 14000 /49000:loss 52.22847974916101
accuracy: 0.349
iteration 15000 /49000:loss 54.25548656781271
accuracy: 0.34
iteration 16000 /49000:loss 51.704645129043726
accuracy: 0.369
iteration 17000 /49000:loss 53.64640222551984
accuracy: 0.355
iteration 18000 /49000:loss 51.124412280193425
accuracy: 0.367
iteration 19000 /49000:loss 52.88764236091077
accuracy: 0.344
iteration 20000 /49000:loss 51.819746910253805
accuracy: 0.361
iteration 21000 /49000:loss 50.80893362897753
accuracy: 0.359
iteration 22000 /49000:loss 49.71865722440956
accuracy: 0.368
iteration 23000 /49000:loss 49.60862316212231
accuracy: 0.387
iteration 24000 /49000:loss 50.272404322186865
accuracy: 0.365
iteration 25000 /49000:loss 48.76374228052603
accuracy: 0.39
iteration 26000 /49000:loss 48.431778596392846
accuracy: 0.403
iteration 27000 /49000:loss 50.84532179202387
accuracy: 0.344
iteration 28000 /49000:loss 49.748673852757875
accuracy: 0.385
iteration 29000 /49000:loss 51.68108050072852
accuracy: 0.341
iteration 30000 /49000:loss 51.18185600465439
accuracy: 0.377
iteration 31000 /49000:loss 51.41396202388631
accuracy: 0.33
iteration 32000 /49000:loss 47.176875566444664
accuracy: 0.392
iteration 33000 /49000:loss 48.327116960102344
accuracy: 0.397
iteration 34000 /49000:loss 48.01184779177274
accuracy: 0.379
iteration 35000 /49000:loss 49.91704344022998
accuracy: 0.387
iteration 36000 /49000:loss 50.45015739121784
accuracy: 0.366
iteration 37000 /49000:loss 49.46066529737113
accuracy: 0.383
iteration 38000 /49000:loss 47.50687770559271
accuracy: 0.372
iteration 39000 /49000:loss 48.3037726148915
accuracy: 0.408
iteration 40000 /49000:loss 49.41335010886645
accuracy: 0.369
iteration 41000 /49000:loss 46.11809323024411
accuracy: 0.386
iteration 42000 /49000:loss 46.66459364643305
accuracy: 0.397
iteration 43000 /49000:loss 48.79361627186944
accuracy: 0.377
iteration 44000 /49000:loss 49.218461436351404
accuracy: 0.385
iteration 45000 /49000:loss 50.82582205906479
accuracy: 0.357
iteration 46000 /49000:loss 50.2309602702847
accuracy: 0.379
iteration 47000 /49000:loss 48.71128767887046
accuracy: 0.386
iteration 48000 /49000:loss 51.15533146660502
accuracy: 0.368
iteration 49000 /49000:loss 52.29126166126106
accuracy: 0.362

·作图

#pic=pd.Series(loss_history)
#print(loss_history)
plt.plot(loss_history)
plt.xlabel('iteration number')
plt.ylabel('loss')
plt.show()

输出
在这里插入图片描述

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值