You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hello everyone
I want to finetune vgg16 by using my own dataset.It has 32 classes but I faced with some error.
train: epoch 01: 1/ 62:148 im2= imdb.images.data(:,:,:,batch);
Error using dagnn.DagNN/eval (line 83)
No variable of name 'label' could be found in the DAG.
Error in cnn_train_dag>processEpoch (line 253)
net.eval(inputs, params.derOutputs, 'holdOn', s < params.numSubBatches) ;
end
% getBatch for IMDBs that are too big to be in RAM
function inputs = getBatch(opts, imdb, batch)
%images = imdb.images.data(:,:,:,batch) ;
labels = imdb.images.labels(1,batch) ;
% labels(labels>10)=10;
im2= imdb.images.data(:,:,:,batch);
im2=im2(1:224,1:224,:,:);
s=size(im2);
images=zeros(s(1),s(2),3,s(4),'single');
images(:,:,1,:)=im2;
images(:,:,2,:)=im2;
images(:,:,3,:)=im2;
if opts.useGpu > 0
images = gpuArray(images) ;
end
inputs = {'input', images, 'label', labels} ;
end
The text was updated successfully, but these errors were encountered:
Hello everyone
I want to finetune vgg16 by using my own dataset.It has 32 classes but I faced with some error.
train: epoch 01: 1/ 62:148 im2= imdb.images.data(:,:,:,batch);
Error using dagnn.DagNN/eval (line 83)
No variable of name 'label' could be found in the DAG.
Error in cnn_train_dag>processEpoch (line 253)
net.eval(inputs, params.derOutputs, 'holdOn', s < params.numSubBatches) ;
Error in cnn_train_dag (line 105)
[net, state] = processEpoch(net, state, params, 'train') ;
Error in fine_test0 (line 113)
info = cnn_train_dag(net, imdb, @(i,b) getBatch(bopts,i,b), opts.train, 'val', find(imdb.images.set == 3)) ;
%and this my code:
%finetune vgg16 for cifar10 dataset
function [net, info] = vgg_train(imdb, expDir)
% Demonstrated MatConNet on CIFAR-10 using DAG
% run(fullfile(fileparts(mfilename('fullpath')), '../../', 'matlab', 'vl_setupnn.m')) ;
run matlab/vl_setupnn
% imdb=load('imdbs.mat', 'imdb')
% imdb=load('imdb_cifar10.mat')
load('imdbs.mat', 'imdb')
% some common options
opts.train.batchSize = 100;
opts.train.numEpochs = 20 ;
opts.train.continue = true ;
opts.train.gpus = [];%[1] ;
opts.train.learningRate = [1e-1ones(1, 10), 1e-2ones(1, 5)];
opts.train.weightDecay = 3e-4;
opts.train.momentum = 0.;
% opts.train.expDir = expDir;
bopts.useGpu = numel(opts.train.gpus) > 0 ; % Usually keep at 0, seems to only work with 3D data.
opts.train.numSubBatches = 1;
% getBatch options
bopts.useGpu = numel(opts.train.gpus) > 0 ;
net.addLayer('relu1_1', dagnn.ReLU(), {'conv1'}, {'relu1'}, {});
net.addLayer('conv1_2', dagnn.Conv('size', [3 3 64 64], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'relu1'}, {'conv2'}, {'conv1_2f' 'conv1_2b'});
net.addLayer('relu1_2', dagnn.ReLU(), {'conv2'}, {'relu2'}, {});
%net.addLayer('lrn1', dagnn.LRN('param', [5 1 0.0001/5 0.75]), {'relu1'}, {'lrn1'}, {});
net.addLayer('pool1', dagnn.Pooling('method', 'max', 'poolSize', [2, 2], 'stride', [2 2], 'pad', [0 0 0 0]),{'relu2'}, {'pool1'}, {});
net.addLayer('conv2_1', dagnn.Conv('size', [3 3 64 128], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'pool1'}, {'conv3'}, {'conv2_1f' 'conv2_1b'});
net.addLayer('relu2_1', dagnn.ReLU(), {'conv3'}, {'relu3'}, {});
%layer8
net.addLayer('conv2_2', dagnn.Conv('size', [3 3 128 128], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu3'}, {'conv4'}, {'conv2_2f' 'conv2_2b'});
net.addLayer('relu2_2', dagnn.ReLU(), {'conv4'}, {'relu4'}, {});
%net.addLayer('lrn2', dagnn.LRN('param', [5 1 0.0001/5 0.75]), {'relu2'}, {'lrn2'}, {});
%change padding in layer10
net.addLayer('pool2', dagnn.Pooling('method', 'max', 'poolSize', [2, 2], 'stride', [2 2], 'pad', [0 0 0 0]),{'relu4'}, {'pool2'}, {});
net.addLayer('conv3_1', dagnn.Conv('size', [3 3 128 256], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'pool2'}, {'conv5'}, {'conv3_1f' 'conv3_1b'});
net.addLayer('relu3_1', dagnn.ReLU(), {'conv5'}, {'relu5'}, {});
net.addLayer('conv3_2', dagnn.Conv('size', [3 3 256 256], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'relu5'}, {'conv6'}, {'conv3_2f' 'conv3_2b'});
net.addLayer('relu3_2', dagnn.ReLU(), {'conv6'}, {'relu6'}, {});
%layer 15 ok
net.addLayer('conv3_3', dagnn.Conv('size', [3 3 256 256], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu6'}, {'conv7'}, {'conv3_3f' 'conv3_3b'});
net.addLayer('relu3_3', dagnn.ReLU(), {'conv7'}, {'relu7'}, {});
%net.addLayer('lrn3', dagnn.LRN('param', [5 1 0.0001/5 0.75]), {'relu2'}, {'lrn2'}, {});
%change in padding in 17 layer
net.addLayer('pool3', dagnn.Pooling('method', 'max', 'poolSize', [2, 2], 'stride', [2 2], 'pad', [0 0 0 0]),{'relu7'}, {'pool3'}, {});
%%%55
net.addLayer('conv4_1', dagnn.Conv('size', [3 3 256 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'pool3'}, {'conv8'}, {'conv4_1f' 'conv4_1b'});
net.addLayer('relu4_1', dagnn.ReLU(), {'conv8'}, {'relu8'}, {});
net.addLayer('conv4_2', dagnn.Conv('size', [3 3 512 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu8'}, {'conv9'}, {'conv4_2f' 'conv4_2b'});
net.addLayer('relu4_2', dagnn.ReLU(), {'conv9'}, {'relu9'}, {});
net.addLayer('conv4_3', dagnn.Conv('size', [3 3 512 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu9'}, {'conv10'}, {'conv4_3f' 'conv4_3b'});
net.addLayer('relu4_3', dagnn.ReLU(), {'conv10'}, {'relu10'}, {});
%net.addLayer('lrn4', dagnn.LRN('param', [5 1 0.0001/5 0.75]), {'relu2'}, {'lrn2'}, {});
%layer24 change in padding
net.addLayer('pool4', dagnn.Pooling('method', 'max', 'poolSize', [2, 2], 'stride', [2 2], 'pad', [0 0 0 0]),{'relu10'}, {'pool4'}, {});
net.addLayer('conv5_1', dagnn.Conv('size', [3 3 512 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]),{'pool4'}, {'conv11'}, {'conv5_1f' 'conv5_1b'});
net.addLayer('relu5_1', dagnn.ReLU(), {'conv11'}, {'relu11'}, {});
net.addLayer('conv5_2', dagnn.Conv('size', [3 3 512 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu11'}, {'conv12'}, {'conv5_2f' 'conv5_2b'});
net.addLayer('relu5_2', dagnn.ReLU(), {'conv12'}, {'relu12'}, {});
%29 layer
net.addLayer('conv5_3', dagnn.Conv('size', [3 3 512 512], 'hasBias', true, 'stride', [1, 1], 'pad', [1 1 1 1]), {'relu12'}, {'conv13'}, {'conv5_3f' 'conv5_3b'});
net.addLayer('relu5_3', dagnn.ReLU(), {'conv13'}, {'relu13'}, {});
net.addLayer('pool5', dagnn.Pooling('method', 'max', 'poolSize', [2, 2], 'stride', [2 2], 'pad', [0 0 0 0]), {'relu13'}, {'pool5'}, {});
net.addLayer('conv6', dagnn.Conv('size', [7 7 512 4096], 'hasBias', true, 'stride', [1, 1], 'pad', [0 0 0 0]), {'pool5'}, {'conv14'}, {'conv6f' 'conv6b'});
net.addLayer('relu6', dagnn.ReLU(), {'conv14'}, {'relu14'}, {});
net.addLayer('drop6', dagnn.DropOut('rate', 0.5), {'relu14'}, {'drop6'}, {});
net.addLayer('conv7', dagnn.Conv('size', [1 1 4096 4096], 'hasBias', true, 'stride', [1, 1], 'pad', [0 0 0 0]), {'drop6'}, {'conv15'}, {'conv7f' 'conv7b'});
net.addLayer('relu7', dagnn.ReLU(), {'conv15'}, {'relu15'}, {});
net.addLayer('drop7', dagnn.DropOut('rate', 0.5), {'relu15'}, {'drop7'}, {});
%%%chane in class number
net.addLayer('classifier', dagnn.Conv('size', [1 1 4096 32], 'hasBias', true, 'stride', [1, 1], 'pad', [0 0 0 0]), {'drop7'}, {'classifier'}, {'conv8f' 'conv8b'});
net.addLayer('prob', dagnn.SoftMax(), {'classifier'}, {'prob'}, {});
% net.addLayer('objective', dagnn.Loss('loss', 'log'), {'prob', 'label'}, {'objective'}, {});
% net.addLayer('error', dagnn.Loss('loss', 'classerror'), {'prob','label'}, 'error') ;
%
%
% initialization of the weights (CRITICAL!!!!)
% if(numel(varargin) > 0)
% initNet_FineTuning(net, netPre);
% else
% initNet_He(net);
% end
% %train
% iinitNet(net, 1/100);
end
function initNet(net, f)
net.initParams();
%
end
% getBatch for IMDBs that are too big to be in RAM
function inputs = getBatch(opts, imdb, batch)
%images = imdb.images.data(:,:,:,batch) ;
labels = imdb.images.labels(1,batch) ;
% labels(labels>10)=10;
im2= imdb.images.data(:,:,:,batch);
im2=im2(1:224,1:224,:,:);
s=size(im2);
end
The text was updated successfully, but these errors were encountered: