1. sampleIMAGES
for i=1:numpatches
imgNum = round(rand(1,1)*9+1);
row = round(rand(1,1)*(512-patchsize)+1);
col = round(rand(1,1)*(512-patchsize)+1);
patch = IMAGES(row:row+patchsize-1,col:col+patchsize-1,imgNum);
patches(:,i) = reshape(patch,patchsize*patchsize,1);
end
2. sparseAutoencoderCost
%forward
a1 = sigmoid(bsxfun(@plus, W1*data, b1));
a2 = sigmoid(bsxfun(@plus, W2*a1, b2));
%cost
ave_square = sum(0.5*sum((a2-data).^2)) / m;
weight_decay = 0.5 * lambda * (sum(sum(W1.^2))+sum(sum(W2.^2)));
tmp = mean(a1, 2);
sparsity = beta * sum(sparsityParam*log(sparsityParam./tmp)+(1-sparsityParam)*log((1-sparsityParam)./(1-tmp)));
cost = ave_square + weight_decay + sparsity;
%BP
tb2grad = (a2-data) .* a2 .* (1-a2);
b2grad = mean(tb2grad,2);
W2grad = tb2grad * a1' ./ m + lambda * W2;
sparsity_penalty = beta * (((1-sparsityParam)./(1-tmp))-(sparsityParam./tmp));
tb1grad = bsxfun(@plus, W2'*tb2grad, sparsity_penalty) .* a1 .* (1-a1);
b1grad = mean(tb1grad,2);
W1grad = tb1grad * data' ./ m + lambda * W1;
3. computeNumericalGradient
EPSILON = 0.0001;
for i=1:size(theta)
l = theta;
r = theta;
l(i) = l(i) - EPSILON;
r(i) = r(i) + EPSILON;
numgrad(i) = (J(r)-J(l))/(2*EPSILON);
disp(i);
end
4. result