ex1
computeCost.m
% ====================== YOUR CODE HERE ======================
% Instructions: Compute the cost of a particular choice of theta
% You should set J to the cost.
AA=sum((theta'*X'-y').^2)
fprintf('%f',AA)
J=AA/(2*m);
% =========================================================================
gradientDescent.m
% ====================== YOUR CODE HERE ======================
% Instructions: Perform a single gradient step on the parameter vector
% theta.
%
% Hint: While debugging, it can be useful to print out the values
% of the cost function (computeCost) and gradient here.
%
theta=theta-(alpha*(theta'*X'-y')*X./m)';
fprintf('The theta is %f,%f',theta(1),theta(2))
J_history(iter) = computeCost(X, y, theta);
fprintf('The J is %f',J_history(iter))
% ============================================================
featureNormalize.m
% ====================== YOUR CODE HERE ======================
% Instructions: First, for each feature dimension, compute the mean
% of the feature and subtract it from the dataset,
% storing the mean value in mu. Next, compute the
% standard deviation of each feature and divide
% each feature by it's standard deviation, storing
% the standard deviation in sigma.
%
% Note that X is a matrix where each column is a
% feature and each row is an example. You need
% to perform the normalization separately for
% each feature.
%
% Hint: You might find the 'mean' and 'std' functions useful.
%
sigma=std(X);
mu=mean(X)
X_norm=(X-mu)./sigma
% ===================================
computeCostMulti.m
% You need to return the following variables correctly
J = 0;
% ====================== YOUR CODE HERE ======================
% Instructions: Compute the cost of a particular choice of theta
% You should set J to the cost.
AA=sum((theta'*X'-y').^2)
fprintf('%f',AA)
J=AA/(2*m);
% =========================================================================
gradientDescentMulti.m
% ====================== YOUR CODE HERE ======================
% Instructions: Perform a single gradient step on the parameter vector
% theta.
%
% Hint: While debugging, it can be useful to print out the values
% of the cost function (computeCostMulti) and gradient here.
%
theta=theta-(alpha*(theta'*X'-y')*X./m)';
fprintf('The theta is %f,%f',theta(1),theta(2))
J_history(iter) = computeCost(X, y, theta);
fprintf('The J is %f',J_history(iter))
% ============================================================
normalEqn.m
theta=pinv(X'*X)*X'*y
ex2
sigmoid.m
% ====================== YOUR CODE HERE ======================
% Instructions: Compute the sigmoid of each value of z (z can be a matrix,
% vector or scalar).
g=1./(1+e.^(-z));
% =================================
costFunciton.m
% ====================== YOUR CODE HERE ======================
% Instructions: Compute the cost of a particular choice of theta.
% You should set J to the cost.
% Compute the partial derivatives and set grad to the partial
% derivatives of the cost w.r.t. each parameter in theta
%
% Note: grad should have the same dimensions as theta
%
h=sigmoid(X*theta);
J=1/m*sum(-y.*log(h)-(1-y).*log(1-h));
grad=1/m*X'*(h-y);
% =================================
predict.m
% ====================== YOUR CODE HERE ======================
% Instructions: Complete the following code to make predictions using
% your learned logistic regression parameters.
% You should set p to a vector of 0's and 1's
%
p=1./(1+exp(-X*theta));
p(find(p>=0.5))=1;
p(find(p<0.5))=0;
% ======================================
此部分原代码如上。但是在思索(百度)过程中,发现了更简单的方式。@蜗牛专注学习 用了一行代码代替了我的两行代码:
%我的代码
%p(find(p>=0.5))=1;
%p(find(p<0.5))=0;
%蜗牛专注学习的代码
p=p>0.5
costFunctionReg.m
% ====================== YOUR CODE HERE ======================
% Instructions: Compute the cost of a particular choice of theta.
% You should set J to the cost.
% Compute the partial derivatives and set grad to the partial
% derivatives of the cost w.r.t. each parameter in theta
h=sigmoid(X*theta);
%
J=1/m*sum(-y.*log(h)-(1-y).*log(1-h));
J=J+lambda/(2*m)*sum(theta(2:size(theta)).^2);
%
grad(1)=1/m*X(:,1)'*(h(:,1)-y);
temp=1/m*X'*(h-y)+lambda/m*theta;
grad(2:size(grad))=temp(2:size(temp));
%
% ====================================
以上是我这两次作业情况。总体不算太难。加油哦!