这个练习是要我们编码实现稀疏自编码的神经网络,并将其可视化出来,需要我们编码的有三个部分:sampleIMAGES.m,sparseAutoencoderCost.m,computeNumericalGradient.m,已下是我实现的代码。
sampleIMAGES.m:
rands = zeros(3,numpatches);
rands(1,:) = unidrnd(10,1,numpatches);
rands(2:3,:) = unidrnd(512-patchsize+1,2,numpatches);
for i=1:numpatches
patch = IMAGES(rands(2,i):rands(2,i)+patchsize-1,rands(3,i):rands(3,i)+patchsize-1,rands(1,i));
patches(:,i) = patch(:);
end
sparseAutoencoderCost.m:
num_example = size(data,2); %样本总数
a1 = sigmoid(W1 * data + repmat(b1,1,num_example)); %第二层激活值
a2 = sigmoid(W2 * a1 + repmat(b2,1,num_example)); %第三层(输出层)激活值
diff = (a2 - data).^2;
data_loss = 0.5 * sum(diff(:))/num_example; %经验损失
reg_loss = 0.5 * lambda * (sum((sum(W1.^2))') + sum((sum(W2.^2))')); %正则项
rou = sum(a1,2)/num_example; %激活度
sparse_loss = beta * sum(sparsityParam * log(sparsityParam./rou) + (1 - sparsityParam) * log((1 - sparsityParam)./(1 - rou))); %稀疏惩罚项
cost = data_loss + reg_loss + sparse_loss; %损失函数
error2 = (a2 - data) .* (a2 .* (1 - a2)); %输出层残差
error1 = (W2' * error2 + repmat(beta * (-sparsityParam./rou + (1- sparsityParam)./(1- rou)),1,num_example)) .* (a1 .* (1 - a1)); %第二层残差(加稀疏性限制)
W2grad = error2 * a1' / num_example;
W2grad = W2grad + lambda * W2;
b2grad = sum(error2,2) / num_example;
W1grad = error1 * data' / num_example;
W1grad = W1grad + lambda * W1;
b1grad = sum(error1,2) / num_example;
epsilon = 10^-4;
e = epsilon * eye(size(theta,1));
for i=1:size(theta,1)
numgrad(i) = (J(theta + e(:,i)) - J(theta - e(:,i)))/(2 * epsilon);
end
输出结果: