2021-10-21

逻辑回归识别猫猫


#数据集加载

#Ir_utils.py
import numpy as np
import h5py
def load_dataset():
    train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
    train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
    train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels

    test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
    test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
    test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels

    classes = np.array(test_dataset["list_classes"][:]) # the list of classes
    
    train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
    test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
    
    return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes

#代码实现

# -*- coding:utf-8 -*-
# author:zzm
# datetime:2021/10/21 15:31
import numpy as np
import matplotlib.pyplot as plt
from lr_utils import load_dataset
def sigmoid(x):
    return 1/(1+np.exp(-x))
def initialize_with_zeros(dim):
    w=np.zeros(shape=(dim,1))
    b=0
    assert (w.shape == (dim, 1))
    assert (isinstance(b, float) or isinstance(b, int))
    return w,b
def propagate(w,b,X,Y):
    m=X.shape[1]
    A=sigmoid(np.dot(w.T,X)+b)
    cost=(-1/m)*np.sum(Y*np.log(A)+(1-Y)*np.log(1-A))
    dw=(1/m)*np.dot(X,(A-Y).T)
    db=(1/m)*np.sum(A-Y)
    assert (dw.shape == w.shape)
    assert (db.dtype == float)
    cost=np.squeeze(cost)
    assert(cost.shape == ())
    grads={
        'dw':dw,
        'db':db
    }
    return grads,cost
def optimize(w,b,X,Y,num_iterations,learning_rate):
    costs=[]
    for i in range(num_iterations):
        grads,cost=propagate(w,b,X,Y)
        dw=grads['dw']
        db=grads['db']
        w=w-learning_rate*dw
        b=b=learning_rate*db
        if i%100==0:
            costs.append(cost)
            print('迭代次数:%d,误差值%f'%(i,cost))
    params={
        'w':w,
        'b':b
    }
    grads={
        'dw':dw,
        'db':db
    }
    return (params,grads,costs)
def predict(w,b,X):
    m=X.shape[1]
    Y_prediction=np.zeros((1,m))
    w=w.reshape(X.shape[0],1)
    A=sigmoid(np.dot(w.T,X)+b)
    for i in range(A.shape[1]):
        Y_prediction[0,i]=0 if A[0,i]<0.5 else 1
    assert (Y_prediction.shape == (1, m))
    return Y_prediction
def model(X_train,Y_train,X_test,Y_test,num_itra=0,learning_rate=0):
    w,b=initialize_with_zeros(X_train.shape[0])
    parameters,grads,costs=optimize(w,b,X_train,Y_train,num_itra,learning_rate)
    w,b=parameters['w'],parameters['b']
    Y_prediction_test=predict(w,b,X_test)
    Y_prediction_train=predict(w,b,X_train)
    print('训练集准确性:', format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100), '%')
    print('测试集准确性:',format(100-np.mean(np.abs(Y_prediction_test-Y_test))*100),'%')
    return
train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes=load_dataset()
#X_flatten = X.reshape(X.shape [0],-1).T #X.T是X的转置
#将训练集的维度降低并转置。
train_set_x_flatten  = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
#将测试集的维度降低并转置。
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
#标准化数据集
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 255

model(train_set_x,train_set_y_orig,test_set_x,test_set_y_orig,num_itra=2000,learning_rate=0.0045)
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Cc2018qaq

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值