神经网络

神经网络
这周有点忙,先贴出之前完成吴恩达作业的代码,笔记后面找时间完善

import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
import time
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
index=25
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") +  "' picture.")
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
train_set_x_flatten = train_set_x_orig.reshape(m_train, -1).T
test_set_x_flatten = test_set_x_orig.reshape(m_test, -1).T
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
def sigmoid(x):
    s = 1.0 / (1 + np.exp(-x))
    return s

def sigmoid_derivatibe(x):
    s=sigmoid(x)
    return s*(1-s)
def img2vector(image):
    v=image.reshape(image.shape[0]*image.shape[1]*image.shape[2],1)
    return v
def normalizeRows(x):
    x_norm=np.linalg.norm(x,axis=1,keepdims=True)
    x=x/x_norm
    return x
def softmax(x):
    x_exp=np.exp(x)
    x_sum=np.linalg.norm(x_exp,axis=1,keepdims=True)
    s=x_exp/x_sum
    return s
def L1(yhat,y):
    loss=np.sum(np.abs(yhat-y));
    return loss
def L2(yhat,y):
    loss=np.sum(np.power(y-yhat),2)
    return loss
def initialize_with_zeros(dim):
    w=np.zeros((dim,1))
    b=0
    assert(w.shape==(dim,1))
    assert(isinstance(b,int) or isinstance(b,float))
    return(w,b)
def propagate(w,b,X,Y):
    m=X.shape[1]
    A=sigmoid(np.dot(w.T,X)+b)
    cost=(-1./m)*np.sum(Y*np.log(A)+(1-Y)*np.log(1-A))
    dw=1./m*np.dot(X,(A-Y).T)
    db=1./m*np.sum(A-Y)
    assert (dw.shape == w.shape)
    assert (db.dtype == float)
    cost = np.squeeze(cost)
    assert (cost.shape == ())
    grads = {"dw": dw,
             "db": db}
    return grads, cost
def optimize(w,b,X,Y,num_iterations,learning_rate,print_cost=False):
    costs=[]
    for i in range(num_iterations):
        grades,cost=propagate(w,b,X,Y)
        dw=grades["dw"]
        db=grades["db"]
        w=w-learning_rate*dw
        b=b-learning_rate*db
        if i%100==0:
            costs.append(cost)#append 在列表里添加列表 extend 在列表里添加元素
        if print_cost and i%100==0:
            print("Cost after iteration %i:%f"%(i,cost))
    params={"w":w,"b":b}
    grads={"dw":dw,"db":db}
    return params,grads,costs
def predict(w,b,X):
    '''
      Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)

      Arguments:
      w -- weights, a numpy array of size (num_px * num_px * 3, 1)
      b -- bias, a scalar
      X -- data of size (num_px * num_px * 3, number of examples)

      Returns:
      Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
      '''
    m=X.shape[1]           #shape[0]矩阵X的行数,shape[1]矩阵X的列数
    Y_prediction=np.zeros((1,m))
    w=w.reshape(X.shape[0],1)
    A=sigmoid(np.dot(w.T,X)+b)
    for i in range(A.shape[1]):
        if A[0,i]>0.5:
            Y_prediction[0,i]=1
        else:
            Y_prediction[0,i]=0
    assert(Y_prediction.shape==(1,m))
    return Y_prediction
def model(X_train,Y_train,X_test,Y_test,num_iterations=2000,learning_rate=0.5,print_cost=False):
    w,b=initialize_with_zeros(X_train.shape[0])
    parameters,grads,costs=optimize(w,b,X_train,Y_train,num_iterations,learning_rate,print_cost)
    w=parameters["w"]
    b=parameters["b"]
    Y_prediction_test=predict(w,b,X_test)
    Y_prediction_train=predict(w,b,X_train)
    print("train accuracy:{}".format(100-np.mean(np.abs(Y_prediction_train-Y_train))*100))#format {}表示输出后面的内容
    print("test accuracy:{}".format(100-np.mean(np.abs(Y_prediction_test-Y_test))*100))#numpy.mean(matrix(,0))求均值,后面参数为0求列均值,为1求行均值
    d={"cost":costs,
       "Y_prediction_test":Y_prediction_test,
       "Y_prediction_train":Y_prediction_train,
       "w":w,
       "b":b,
       "learning rate":learning_rate,
       "num_iterations":num_iterations}
    return d
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
plt.show()
print(d["Y_prediction_test"][0,index])
#print ("y = " + str(test_set_y[0,index]) + d["Y_prediction_test"][0,index])
my_image = "other_test.jpg"
fname="D:/文件/"+my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px))
my_image=my_image.reshape((1,num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
print(my_predicted_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") +  "\" picture.")
print(classes)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值