[CS231n@Stanford] Assignment1-Q5 (python) features实现


features.ipynb

from linear_classifier import LinearSVM

learning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [1e5, 1e6, 1e7]

results = {}
best_val = -1
best_svm = None

pass
################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained classifer in best_svm. You might also want to play          #
# with different numbers of bins in the color histogram. If you are careful    #
# you should be able to get accuracy of near 0.44 on the validation set.       #
################################################################################

iters = 2000  
for lr in learning_rates:  
    for reg in regularization_strengths:  
        svm = LinearSVM()  
        svm.train(X_train_feats, y_train, learning_rate=lr, reg=reg, num_iters=iters)  
          
        y_train_pred = svm.predict(X_train_feats)  
        acc_train = np.mean(y_train == y_train_pred)  
          
        y_val_pred = svm.predict(X_val_feats)  
        acc_val = np.mean(y_val == y_val_pred)  
  
        results[(lr, reg)] = (acc_train, acc_val)  
          
        if best_val < acc_val:  
            best_val = acc_val  
            best_svm = svm  
            
pass
################################################################################
#                              END OF YOUR CODE                                #
################################################################################

# Print out results.
for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
                lr, reg, train_accuracy, val_accuracy)
    
print 'best validation accuracy achieved during cross-validation: %f' % best_val

# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print 'test_accuracy',test_accuracy

lr 1.000000e-09 reg 1.000000e+05 train accuracy: 0.108265 val accuracy: 0.123000
lr 1.000000e-09 reg 1.000000e+06 train accuracy: 0.111939 val accuracy: 0.133000
lr 1.000000e-09 reg 1.000000e+07 train accuracy: 0.414204 val accuracy: 0.413000
lr 1.000000e-08 reg 1.000000e+05 train accuracy: 0.110429 val accuracy: 0.112000
lr 1.000000e-08 reg 1.000000e+06 train accuracy: 0.413490 val accuracy: 0.406000
lr 1.000000e-08 reg 1.000000e+07 train accuracy: 0.419245 val accuracy: 0.422000
lr 1.000000e-07 reg 1.000000e+05 train accuracy: 0.414082 val accuracy: 0.418000
lr 1.000000e-07 reg 1.000000e+06 train accuracy: 0.414673 val accuracy: 0.414000
lr 1.000000e-07 reg 1.000000e+07 train accuracy: 0.325633 val accuracy: 0.347000
best validation accuracy achieved during cross-validation: 0.422000
test_accuracy 0.417

from neural_net import TwoLayerNet

input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10

#net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None

################################################################################
# TODO: Train a two-layer neural network on image features. You may want to    #
# cross-validate various parameters as in previous sections. Store your best   #
# model in the best_net variable.                                              #
################################################################################

results = {} 
best_val = -1  
learning_rate = [1e-1, 5e-1, 1]
regularization_strengths = [1e-3, 5e-3, 1e-2] 

iters = 2000    
for lr in learning_rate:    
    for reg in regularization_strengths:    
       
        print lr,reg
        net = TwoLayerNet(input_dim, hidden_dim, num_classes)
        net.train(X_train_feats, y_train, X_val_feats, y_val, 
                  learning_rate=lr, reg=reg, learning_rate_decay=0.95, 
                  num_iters=iters, verbose=False)    
            
        y_train_pred = net.predict(X_train_feats)    
        acc_train = np.mean(y_train == y_train_pred)    
            
        y_val_pred = net.predict(X_val_feats)    
        acc_val = np.mean(y_val == y_val_pred)    
    
        results[(lr, reg)] = (acc_train, acc_val)    
            
        if best_val < acc_val:    
            best_val = acc_val    
            best_net = net   


pass
################################################################################
#                              END OF YOUR CODE                                #
################################################################################

for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
                lr, reg, train_accuracy, val_accuracy)
    
print 'best validation accuracy achieved during cross-validation: %f' % best_val 
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.

test_acc = (net.predict(X_test_feats) == y_test).mean()
print 'test_acc:',test_acc

lr 1.000000e-01 reg 1.000000e-03 train accuracy: 0.554041 val accuracy: 0.539000
lr 1.000000e-01 reg 5.000000e-03 train accuracy: 0.544286 val accuracy: 0.529000
lr 1.000000e-01 reg 1.000000e-02 train accuracy: 0.533510 val accuracy: 0.526000
lr 5.000000e-01 reg 1.000000e-03 train accuracy: 0.704449 val accuracy: 0.596000
lr 5.000000e-01 reg 5.000000e-03 train accuracy: 0.625367 val accuracy: 0.569000
lr 5.000000e-01 reg 1.000000e-02 train accuracy: 0.569918 val accuracy: 0.535000
lr 1.000000e+00 reg 1.000000e-03 train accuracy: 0.731633 val accuracy: 0.586000
lr 1.000000e+00 reg 5.000000e-03 train accuracy: 0.616347 val accuracy: 0.575000
lr 1.000000e+00 reg 1.000000e-02 train accuracy: 0.548939 val accuracy: 0.533000
best validation accuracy achieved during cross-validation: 0.596000
test_acc: 0.536


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值