Skip to content

Commit

Permalink
Merge pull request opencv#1179 from arrybn:statistics
Browse files Browse the repository at this point in the history
  • Loading branch information
vpisarev committed May 24, 2017
2 parents 9ace633 + a5d0ef5 commit a4cc801
Show file tree
Hide file tree
Showing 16 changed files with 412 additions and 0 deletions.
46 changes: 46 additions & 0 deletions modules/dnn/include/opencv2/dnn/dnn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,8 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const;
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const {(void)inputs; (void)outputs; return 0;}

CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes.
CV_PROP String type; //!< Type name which was used for creating layer by layer factory.
Expand Down Expand Up @@ -323,6 +325,50 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
const int layerId,
std::vector<MatShape>* inLayerShapes,
std::vector<MatShape>* outLayerShapes) const;
/** @brief Computes FLOP for whole loaded model with specified input shapes.
* @param netInputShapes vector of shapes for all net inputs.
* @returns computed FLOP.
*/
CV_WRAP int64 getFLOPS(const std::vector<MatShape>& netInputShapes) const;
/** @overload */
CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const;
/** @overload */
CV_WRAP int64 getFLOPS(const int layerId,
const std::vector<MatShape>& netInputShapes) const;
/** @overload */
CV_WRAP int64 getFLOPS(const int layerId,
const MatShape& netInputShape) const;

/** @brief Returns list of types for layer used in model.
* @param layersTypes output parameter for returning types.
*/
CV_WRAP void getLayerTypes(std::vector<String>& layersTypes) const;

/** @brief Returns count of layers of specified type.
* @param layerType type.
* @returns count of layers
*/
CV_WRAP int getLayersCount(const String& layerType) const;

/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for model.
* @param netInputShapes vector of shapes for all net inputs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
size_t& weights, size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const;
/** @overload */
CV_WRAP void getMemoryConsumption(const int layerId,
const MatShape& netInputShape,
size_t& weights, size_t& blobs) const;
private:

struct Impl;
Expand Down
138 changes: 138 additions & 0 deletions modules/dnn/src/dnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -876,6 +876,144 @@ void Net::getLayerShapes(const Net::Impl::ShapesVec& netInputShapes,
*outLayerShapes = shapes.out;
}

int64 Net::getFLOPS(const std::vector<MatShape>& netInputShapes) const
{
int64 flops = 0;
std::vector<int> ids;
std::vector<std::vector<MatShape> > inShapes, outShapes;
getLayersShapes(netInputShapes, &ids, &inShapes, &outShapes);
CV_Assert(inShapes.size() == outShapes.size());
CV_Assert(inShapes.size() == ids.size());

for(int i = 0; i < ids.size(); i++)
{
flops += impl->layers[ids[i]].getLayerInstance()->getFLOPS(inShapes[i],
outShapes[i]);
}

return flops;
}

int64 Net::getFLOPS(const MatShape& netInputShape) const
{
return getFLOPS(std::vector<MatShape>(1, netInputShape));
}

int64 Net::getFLOPS(const int layerId,
const std::vector<MatShape>& netInputShapes) const
{
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerId);
CV_Assert(layer != impl->layers.end());

Impl::LayerShapes shapes;
impl->getLayerShapes(netInputShapes, layerId, shapes);

return layer->second.getLayerInstance()->getFLOPS(shapes.in, shapes.out);
}

int64 Net::getFLOPS(const int layerId,
const MatShape& netInputShape) const
{
return getFLOPS(layerId, std::vector<MatShape>(1, netInputShape));
}

void Net::getLayerTypes(std::vector<String>& layersTypes) const
{
layersTypes.clear();

std::map<String, int> layers;
for (Impl::MapIdToLayerData::iterator it = impl->layers.begin();
it != impl->layers.end(); it++)
{
if (layers.find(it->second.type) == layers.end())
layers[it->second.type] = 0;
layers[it->second.type]++;
}

for (std::map<String, int>::iterator it = layers.begin();
it != layers.end(); it++)
{
layersTypes.push_back(it->first);
}
}

int Net::getLayersCount(const String& layerType) const
{
int count = 0;
for (Impl::MapIdToLayerData::iterator it = impl->layers.begin();
it != impl->layers.end(); it++)
{
if (it->second.type == layerType)
count++;
}
return count;
}

void Net::getMemoryConsumption(const int layerId,
const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const
{
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerId);
CV_Assert(layer != impl->layers.end());

weights = blobs = 0;

for(int i = 0; i < layer->second.params.blobs.size(); i++)
{
const Mat& weightsBlob = layer->second.params.blobs[i];
weights += weightsBlob.total()*weightsBlob.elemSize();
}

std::vector<MatShape> outLayerShapes;
getLayerShapes(netInputShapes, layerId, 0, &outLayerShapes);
for(int i = 0; i < outLayerShapes.size(); i++)
{
blobs += total(outLayerShapes[i]) * sizeof(float);
}
}

void Net::getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
size_t& weights, size_t& blobs) const
{
std::vector<int> layerIds;
std::vector<std::vector<MatShape> > outLayerShapes;

getLayersShapes(netInputShapes, &layerIds, 0, &outLayerShapes);

weights = blobs = 0;
for(int i = 0; i < layerIds.size(); i++)
{
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerIds[i]);
CV_Assert(layer != impl->layers.end());

for(int j = 0; j < layer->second.params.blobs.size(); j++)
{
const Mat& weightsBlob = layer->second.params.blobs[j];
weights += weightsBlob.total()*weightsBlob.elemSize();
}

for(int j = 0; j < outLayerShapes[i].size(); j++)
{
blobs += total(outLayerShapes[i][j]) * sizeof(float);
}
}
}

void Net::getMemoryConsumption(const int layerId,
const MatShape& netInputShape,
size_t& weights, size_t& blobs) const
{
getMemoryConsumption(layerId, std::vector<MatShape>(1, netInputShape),
weights, blobs);
}

void Net::getMemoryConsumption(const MatShape& netInputShape,
size_t& weights, size_t& blobs) const
{
getMemoryConsumption(std::vector<MatShape>(1, netInputShape),
weights, blobs);
}

//////////////////////////////////////////////////////////////////////////

Importer::~Importer() {}
Expand Down
14 changes: 14 additions & 0 deletions modules/dnn/src/layers/batch_norm_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Implementation of Batch Normalization layer.
*/

#include "../precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>

namespace cv
{
Expand Down Expand Up @@ -78,6 +79,19 @@ class BatchNormLayerImpl : public BatchNormLayer
}
}

virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning

int64 flops = 0;
for(int i = 0; i < inputs.size(); i++)
{
flops += 3*total(inputs[i]);
}
return flops;
}

bool hasWeights, hasBias;
float epsilon;
};
Expand Down
30 changes: 30 additions & 0 deletions modules/dnn/src/layers/convolution_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,20 @@ class ConvolutionLayerImpl : public BaseConvolutionLayerImpl
dilation.height, dilation.width, outH, outW, dstRow.ptr<float>());
}
}

virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
CV_Assert(inputs.size() == outputs.size());

int64 flops = 0;
for (int i = 0; i < inputs.size(); i++)
{
flops += total(outputs[i])*(2*kernel.area()*inputs[i][1] + 1);
}

return flops;
}
};

class DeConvolutionLayerImpl : public BaseConvolutionLayerImpl
Expand Down Expand Up @@ -339,6 +353,22 @@ class DeConvolutionLayerImpl : public BaseConvolutionLayerImpl
dilation.height, dilation.width, dstImg.ptr<float>(), &ofsbuf[0]);
}

virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
CV_Assert(inputs.size() == outputs.size());

float flops = 0;
int outChannels = blobs[0].size[0];

for (int i = 0; i < inputs.size(); i++)
{
flops += 2*outChannels*kernel.area()*total(inputs[i]);
}

return flops;
}

std::vector<int> ofsbuf;
};

Expand Down
39 changes: 39 additions & 0 deletions modules/dnn/src/layers/elementwise_layers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,17 @@ class ElementWiseLayer : public Func::Layer
}
}

virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
long flops = 0;
for (int i = 0; i < outputs.size(); i++)
{
flops += total(outputs[i]) * func.getFLOPSPerElement();
}
return flops;
}

Func func;
bool run_parallel;
};
Expand All @@ -79,6 +90,8 @@ struct ReLUFunctor
{
return (x >= (TFloat)0) ? x : (TFloat)slope * x;
}

int64 getFLOPSPerElement() const {return 1;}
};

struct TanHFunctor
Expand All @@ -90,6 +103,8 @@ struct TanHFunctor
{
return tanh(x);
}

int64 getFLOPSPerElement() const {return 1;}
};

struct SigmoidFunctor
Expand All @@ -101,6 +116,8 @@ struct SigmoidFunctor
{
return (TFloat)1 / ((TFloat)1 + exp(-x));
}

int64 getFLOPSPerElement() const {return 3;}
};

struct AbsValFunctor
Expand All @@ -112,6 +129,8 @@ struct AbsValFunctor
{
return abs(x);
}

int64 getFLOPSPerElement() const {return 1;}
};

struct BNLLFunctor
Expand All @@ -123,6 +142,8 @@ struct BNLLFunctor
{
return log((TFloat)1 + exp(-abs(x)));
}

int64 getFLOPSPerElement() const {return 5;}
};

struct PowerFunctor
Expand All @@ -141,6 +162,8 @@ struct PowerFunctor
{
return pow((TFloat)shift + (TFloat)scale * x, (TFloat)power);
}

int64 getFLOPSPerElement() const {return 3;}
};

struct PowerFunctor1
Expand All @@ -158,6 +181,8 @@ struct PowerFunctor1
{
return (TFloat)shift + (TFloat)scale * x;
}

int64 getFLOPSPerElement() const {return 2;}
};

class ChannelsPReLULayerImpl : public ChannelsPReLULayer
Expand Down Expand Up @@ -210,6 +235,20 @@ class ChannelsPReLULayerImpl : public ChannelsPReLULayer
}
}
}

virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)inputs; // suppress unused variable warning
long flops = 0;

for (int i = 0; i < outputs.size(); i++)
{
flops += total(outputs[i]) * 3;
}

return flops;
}
};

#define ACTIVATION_CREATOR_FOR(_Layer, _Functor, ...) \
Expand Down
11 changes: 11 additions & 0 deletions modules/dnn/src/layers/eltwise_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,17 @@ class EltwiseLayerImpl : public EltwiseLayer
break;
}
}

virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const
{
(void)outputs; // suppress unused variable warning
CV_Assert(inputs.size());

long flops = inputs.size() * total(inputs[0]);

return flops;
}
};

Ptr<EltwiseLayer> EltwiseLayer::create(const LayerParams& params)
Expand Down
Loading

0 comments on commit a4cc801

Please sign in to comment.