Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added: Ex4 tests #5

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions ex4/test_ex4.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
%!test test_sanity()

% test nnCostFunction, cost w/o regularization
%!test test_nnCostFunction_costNoReg()

% test nnCostFunction, cost with regularization
%!test test_nnCostFunction_costReg()

% test gradient for sigmoid
%!test test_sigmoidGradient()

% test nnCostFunction, gradient (backprop) w/o regularization
%!test test_nnCostFunction_gradNoReg()

%!test test_nnCostFunction_gradCheck(0)

% test nnCostFunction, gradient (backprop) with regularization
%!test test_nnCostFunction_gradReg()

%!test test_nnCostFunction_gradCheck(3)
33 changes: 33 additions & 0 deletions ex4/test_nnCostFunction_costNoReg.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
function test_nnCostFunction_costNoReg()
epsilon = 1e-4;

% input descriptions
input_layer_size = 400; % 20x20 Input Images of Digits
hidden_layer_size = 25; % 25 hidden units
num_labels = 10; % 10 labels, from 1 to 10
m = 5000; % number of examples in training set

Theta1 = rand(hidden_layer_size, input_layer_size+1);
Theta2 = rand(num_labels, hidden_layer_size+1);
nn_params = [Theta1(:) ; Theta2(:)]; % Unroll parameters
X = rand(m, input_layer_size); % training set features
y = mod(1:m, num_labels)'; % training set results, labels {1 ... 10}
y(y == 0) = 10;

% Weight regularization parameter (we set this to 0 here).
lambda = 0;

[J grad] = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lambda);
% J is a real number
% grad is a "unrolled" vector of the partial derivatives of the neural network
% grad size is (hidden_layer_size*(input_layer_size+1) + num_labels*(hidden_layer_size+1), 1)
%~ assert(J, 1);
%~ assert(grad, rand(hidden_layer_size*(input_layer_size+1) + num_labels*(hidden_layer_size+1), 1));

% tests
% https://class.coursera.org/ml-005/forum/thread?thread_id=1783
% assignment #1, NN cost function w/o regularization
[J] = nnCostFunction(sec(1:1:32)', 2, 4, 4, reshape(tan(1:32), 16, 2) / 5, 1 + mod(1:16,4)', 0);
assert(J, 10.931, epsilon);

endfunction
9 changes: 9 additions & 0 deletions ex4/test_nnCostFunction_costReg.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
function test_nnCostFunction_costReg()
epsilon = 1e-4;

% https://class.coursera.org/ml-005/forum/thread?thread_id=1783
% Assignment #2, NN cost function with regularization
[J] = nnCostFunction(sec(1:1:32)', 2, 4, 4, reshape(tan(1:32), 16, 2) / 5, 1 + mod(1:16,4)', 0.1);
assert(J, 170.9933, epsilon);

endfunction
41 changes: 41 additions & 0 deletions ex4/test_nnCostFunction_gradCheck.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
function test_nnCostFunction_gradCheck(lambda)
% this test borrowing code from ML class Ex4 checkNNGradients.m
% and using Ex4 modules
% computeNumericalGradient.m
% debugInitializeWeights.m

input_layer_size = 3;
hidden_layer_size = 5;
num_labels = 3;
m = 5;

% We generate some 'random' test data
Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size); % 5x4
Theta2 = debugInitializeWeights(num_labels, hidden_layer_size); % 3x6

% Reusing debugInitializeWeights to generate X
X = debugInitializeWeights(m, input_layer_size - 1);
y = 1 + mod(1:m, num_labels)';

% Unroll parameters
nn_params = [Theta1(:) ; Theta2(:)];

% Short hand for cost function
costFunc = @(p) nnCostFunction(p, input_layer_size, hidden_layer_size, num_labels, X, y, lambda);

[cost, grad] = costFunc(nn_params);
numgrad = computeNumericalGradient(costFunc, nn_params);
if lambda == 0
assert(cost, 2.1010, 1e-4);
elseif lambda == 3
assert(cost, 2.1464, 1e-4);
end
assert(size(grad), size(numgrad));

% Evaluate the norm of the difference between two solutions.
% If you have a correct implementation, and assuming you used EPSILON = 0.0001
% in computeNumericalGradient.m, then diff below should be less than 1e-9
diff = norm(numgrad-grad)/norm(numgrad+grad);
assert(diff, 2.3e-11, 1e-9);

endfunction
41 changes: 41 additions & 0 deletions ex4/test_nnCostFunction_gradNoReg.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
function test_nnCostFunction_gradNoReg()
epsilon = 1e-4;

% https://class.coursera.org/ml-005/forum/thread?thread_id=1783
% Assignment #4, NN cost function, backprop, gradient w/o regularization
[J grad] = nnCostFunction(sec(1:1:32)', 2, 4, 4, reshape(tan(1:32), 16, 2) / 5, 1 + mod(1:16,4)', 0);
a = [ 3.0518e-001
7.1044e-002
5.1307e-002
6.2115e-001
-7.4310e-002
5.2173e-002
-2.9711e-003
-5.5435e-002
-9.5647e-003
-4.6995e-002
1.0499e-004
9.0452e-003
-7.4506e-002
7.4997e-001
-1.7991e-002
4.4328e-001
-5.9840e-002
5.3455e-001
-7.8995e-002
3.5278e-001
-5.3284e-003
8.4440e-002
-3.4384e-002
6.6441e-002
-3.4314e-002
3.3322e-001
-7.0455e-002
1.5063e-001
-1.7708e-002
2.7170e-001
7.1129e-002
1.4488e-001];
assert(grad, a, epsilon);

endfunction
41 changes: 41 additions & 0 deletions ex4/test_nnCostFunction_gradReg.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
function test_nnCostFunction_gradReg()
epsilon = 1e-4;

% https://class.coursera.org/ml-005/forum/thread?thread_id=1783
% Assignment #5, NN cost function, backprop, gradient with regularization
[J grad] = nnCostFunction(sec(1:1:32)', 2, 4, 4, reshape(tan(1:32), 16, 2) / 5, 1 + mod(1:16,4)', 0.1);
a = [ 0.3051843
0.0710438
0.0513066
0.6211486
-0.0522766
0.0586827
0.0053191
-0.0983900
-0.0164243
-0.0544438
1.4123116
0.0164517
-0.0745060
0.7499671
-0.0179905
0.4432801
-0.0825542
0.5440175
-0.0726739
0.3680935
-0.0167392
0.0781902
-0.0461142
0.0811755
-0.0280090
0.3428785
-0.0918487
0.1441408
-0.0260627
0.3122174
0.0779614
0.1523740];
assert(grad, a, epsilon);

endfunction
4 changes: 4 additions & 0 deletions ex4/test_sanity.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
function test_sanity ()
% make sure tests are running correctly
assert(1,1);
endfunction
44 changes: 44 additions & 0 deletions ex4/test_sigmoidGradient.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
function test_sigmoidGradient()
epsilon = 1e-4;

x = sigmoidGradient(0);
a = 0.25000;
assert(x, a, epsilon);

x = sigmoidGradient(1);
a = 0.19661;
assert(x, a, epsilon);

x = sigmoidGradient(5);
a = 0.0066481;
assert(x, a, epsilon);

x = sigmoidGradient([0 1 2; 3 4 5]);
a = [ 0.2500000 0.1966119 0.1049936
0.0451767 0.0176627 0.0066481 ];
assert(x, a, epsilon);

x = sigmoidGradient([0 1 2]);
a = [0.25000 0.19661 0.10499];
assert(x, a, epsilon);

x = sigmoidGradient([0; 1; 2]);
a = [ 0.25000
0.19661
0.10499];
assert(x, a, epsilon);

x = sigmoidGradient([-6 -7]);
a = [2.4665e-03 9.1022e-04];
assert(x, a, epsilon);

% https://class.coursera.org/ml-005/forum/thread?thread_id=1783
x = sigmoidGradient(sec(1:1:5)');
a = [ 0.117342
0.076065
0.195692
0.146323
0.027782];
assert(x, a, epsilon);

endfunction