ComputerCost.m
function J = computeCost(X, y, theta)
m = length(y); % number of training examples
t=X*theta-y;
t=t.^2;
costs=sum(t);
costs=costs/(2*m);
J = costs;
end
GradientDescent.m
function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters)
m = length(y); % number of training examples
J_history = zeros(num_iters, 1);
[a,b]=size(theta);
for iter = 1:num_iters
t=X*theta-y;
for j = 1:a
tt=t.*X(:,j);
theta(j,1)=theta(j,1)-(alpha/m)*sum(tt);
end
J_history(iter) = computeCost(X, y, theta);
end
end