Skip to content

Commit

Permalink
Made separate functions for computing output shapes for all layers. R…
Browse files Browse the repository at this point in the history
…emoved output blobs allocation from layers
  • Loading branch information
arrybn committed May 23, 2017
1 parent 27bf9e2 commit 9b73fee
Show file tree
Hide file tree
Showing 37 changed files with 1,102 additions and 897 deletions.
18 changes: 2 additions & 16 deletions modules/dnn/include/opencv2/dnn/all_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,21 +121,7 @@ namespace dnn
* @details If this parameter is empty or unset then @p outTailShape = [`Wh`.size(0)] will be used,
* where `Wh` is parameter from setWeights().
*/
virtual void setOutShape(const std::vector<int> &outTailShape = std::vector<int>()) = 0;

/** @brief Set @f$ h_{t-1} @f$ value that will be used in next forward() calls.
* @details By-default @f$ h_{t-1} @f$ is inited by zeros and updated after each forward() call.
*/
virtual void setH(const Mat &H) = 0;
/** @brief Returns current @f$ h_{t-1} @f$ value (deep copy). */
virtual Mat getH() const = 0;

/** @brief Set @f$ c_{t-1} @f$ value that will be used in next forward() calls.
* @details By-default @f$ c_{t-1} @f$ is inited by zeros and updated after each forward() call.
*/
virtual void setC(const Mat &C) = 0;
/** @brief Returns current @f$ c_{t-1} @f$ value (deep copy). */
virtual Mat getC() const = 0;
virtual void setOutShape(const MatShape &outTailShape = MatShape()) = 0;

/** @brief Specifies either interpet first dimension of input blob as timestamp dimenion either as sample.
*
Expand Down Expand Up @@ -289,7 +275,7 @@ namespace dnn
class CV_EXPORTS ReshapeLayer : public Layer
{
public:
std::vector<int> newShapeDesc;
MatShape newShapeDesc;
Range newShapeRange;

static Ptr<ReshapeLayer> create(const LayerParams& params);
Expand Down
70 changes: 59 additions & 11 deletions modules/dnn/include/opencv2/dnn/dnn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
//! @addtogroup dnn
//! @{

typedef std::vector<int> MatShape;

/** @brief Initialize dnn module and built-in layers.
*
* This function automatically called on most of OpenCV builds,
Expand Down Expand Up @@ -87,33 +89,35 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
//! List of learned parameters must be stored here to allow read them by using Net::getParam().
CV_PROP_RW std::vector<Mat> blobs;

/** @brief Allocates internal buffers and output blobs with respect to the shape of inputs.
/** @brief Computes and sets internal parameters according to inputs, outputs and blobs.
* @param[in] input vector of already allocated input blobs
* @param[out] output vector of output blobs, which must be allocated
* @param[out] output vector of already allocated output blobs
*
* This method must create each produced blob according to shape of @p input blobs and internal layer params.
* If this method is called first time then @p output vector consists from empty blobs and its size determined by number of output connections.
* This method can be called multiple times if size of any @p input blob was changed.
* If this method is called after network has allocated all memory for input and output blobs
* and before inferencing.
*/
virtual void allocate(const std::vector<Mat*> &input, std::vector<Mat> &output) = 0;
virtual void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output);

/** @brief Given the @p input blobs, computes the output @p blobs.
* @param[in] input the input blobs.
* @param[out] output allocated output blobs, which will store results of the computation.
* @param[out] internals allocated internal blobs
*/
virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output) = 0;
virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) = 0;

/** @brief @overload */
CV_WRAP void allocate(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
CV_WRAP void finalize(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);

/** @brief @overload */
CV_WRAP std::vector<Mat> allocate(const std::vector<Mat> &inputs);
CV_WRAP std::vector<Mat> finalize(const std::vector<Mat> &inputs);

/** @brief @overload */
CV_WRAP void forward(const std::vector<Mat> &inputs, CV_IN_OUT std::vector<Mat> &outputs);
CV_WRAP void forward(const std::vector<Mat> &inputs, CV_IN_OUT std::vector<Mat> &outputs,
CV_IN_OUT std::vector<Mat> &internals);

/** @brief Allocates layer and computes output. */
CV_WRAP void run(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
CV_WRAP void run(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs,
CV_IN_OUT std::vector<Mat> &internals);

/** @brief Returns index of input blob into the input array.
* @param inputName label of input blob
Expand All @@ -127,6 +131,11 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
*/
virtual int outputNameToIndex(String outputName);

virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const;

CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes.
CV_PROP String type; //!< Type name which was used for creating layer by layer factory.

Expand Down Expand Up @@ -275,6 +284,45 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
/** @brief Returns indexes of layers with unconnected outputs.
*/
CV_WRAP std::vector<int> getUnconnectedOutLayers() const;
/** @brief Returns input and output shapes for all layers in loaded model;
* preliminary inferencing isn't necessary.
* @param netInputShapes shapes for all input blobs in net input layer.
* @param layersIds output parameter for layer IDs.
* @param inLayersShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayersShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
CV_WRAP void getLayersShapes(const std::vector<MatShape>& netInputShapes,
std::vector<int>* layersIds,
std::vector<std::vector<MatShape> >* inLayersShapes,
std::vector<std::vector<MatShape> >* outLayersShapes) const;

/** @overload */
CV_WRAP void getLayersShapes(const MatShape& netInputShape,
std::vector<int>* layersIds,
std::vector<std::vector<MatShape> >* inLayersShapes,
std::vector<std::vector<MatShape> >* outLayersShapes) const;

/** @brief Returns input and output shapes for layer with specified
* id in loaded model; preliminary inferencing isn't necessary.
* @param netInputShape shape input blob in net input layer.
* @param layerId id for layer.
* @param inLayerShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayerShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
CV_WRAP void getLayerShapes(const MatShape& netInputShape,
const int layerId,
std::vector<MatShape>* inLayerShapes,
std::vector<MatShape>* outLayerShapes) const;

/** @overload */
CV_WRAP void getLayerShapes(const std::vector<MatShape>& netInputShapes,
const int layerId,
std::vector<MatShape>* inLayerShapes,
std::vector<MatShape>* outLayerShapes) const;
private:

struct Impl;
Expand Down
88 changes: 68 additions & 20 deletions modules/dnn/include/opencv2/dnn/shape_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,22 +55,6 @@ inline std::ostream &operator<< (std::ostream &s, cv::Range &r)
return s << "[" << r.start << ", " << r.end << ")";
}

//Reshaping
//TODO: add -1 specifier for automatic size inferring

/*template<typename Mat>
void reshape(Mat &m, const BlobShape &shape)
{
m = m.reshape(1, shape.dims(), shape.ptr());
}
template<typename Mat>
Mat reshaped(const Mat &m, const BlobShape &shape)
{
return m.reshape(1, shape.dims(), shape.ptr());
}*/


//Slicing

struct _Range : public cv::Range
Expand Down Expand Up @@ -139,12 +123,76 @@ static inline Mat getPlane(const Mat &m, int n, int cn)
return m(range).reshape(1, m.dims-2, sz);
}

static inline size_t shapeTotal(const std::vector<int>& shape)
static inline MatShape shape(const int* dims, const int n = 4)
{
MatShape shape;
shape.assign(dims, dims + n);
return shape;
}

static inline MatShape shape(const MatSize& size)
{
return shape((const int*)size, size.dims());
}

static inline MatShape shape(const Mat& mat)
{
return shape(mat.size);
}

namespace {inline bool is_neg(int i) { return i < 0; }}

static inline MatShape shape(int a0, int a1=-1, int a2=-1, int a3=-1)
{
int dims[] = {a0, a1, a2, a3};
MatShape s = shape(dims);
s.erase(std::remove_if(s.begin(), s.end(), is_neg), s.end());
return s;
}

static inline int total(const MatShape& shape, int start = -1, int end = -1)
{
if (start == -1) start = 0;
if (end == -1) end = shape.size();

if (shape.empty())
return 0;

int elems = 1;
CV_Assert(start < shape.size() && end <= shape.size() &&
start <= end);
for(int i = start; i < end; i++)
{
elems *= shape[i];
}
return elems;
}

static inline MatShape concat(const MatShape& a, const MatShape& b)
{
size_t i, n = shape.size(), p = 1;
for( i = 0; i < n; i++ ) p *= shape[i];
MatShape c = a;
c.insert(c.end(), b.begin(), b.end());

return p;
return c;
}

inline void print(const MatShape& shape, const String& name = "")
{
printf("%s: [", name.c_str());
size_t i, n = shape.size();
for( i = 0; i < n; i++ )
printf(" %d", shape[i]);
printf(" ]\n");
}

inline int clamp(int ax, int dims)
{
return ax < 0 ? ax + dims : ax;
}

inline int clamp(int ax, const MatShape& shape)
{
return clamp(ax, shape.size());
}

}
Expand Down
2 changes: 2 additions & 0 deletions modules/dnn/misc/python/pyopencv_dnn.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#ifdef HAVE_OPENCV_DNN
typedef dnn::DictValue LayerId;
typedef std::vector<dnn::MatShape> vector_MatShape;
typedef std::vector<std::vector<dnn::MatShape> > vector_vector_MatShape;

template<>
bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const char *name)
Expand Down
28 changes: 21 additions & 7 deletions modules/dnn/perf/perf_convolution.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#include "perf_precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>

namespace cvtest
{
Expand All @@ -21,14 +22,14 @@ CV_ENUM(GroupSize, GROUP_OFF, GROUP_2);
//Squared Size
#define SSZ(n) cv::Size(n, n)

typedef std::pair<std::vector<int>, int> InpShapeNumOut;
typedef std::pair<MatShape, int> InpShapeNumOut;
typedef tuple<Size, InpShapeNumOut, GroupSize, StrideSize> ConvParam; //kernel_size, inp shape, groups, stride
typedef TestBaseWithParam<ConvParam> ConvolutionPerfTest;

static inline std::vector<int> blobShape(int count, int nplanes, int height, int width)
static inline MatShape blobShape(int count, int nplanes, int height, int width)
{
int data[] = {count, nplanes, height, width};
return std::vector<int>(data, data+4);
return MatShape(data, data+4);
}

PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
Expand All @@ -44,7 +45,7 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine(

ConvParam params = GetParam();
int ksz = get<0>(params).width;
std::vector<int> inpShape = get<1>(params).first;
MatShape inpShape = get<1>(params).first;
int outCn = get<1>(params).second;
int groups = get<2>(params);
int stride = (ksz >= 11) ? 4 : (int)get<3>(params);
Expand All @@ -69,12 +70,25 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine(
lp.blobs.push_back(biasBlob);

std::vector<Mat*> inpBlobs(1, &inpBlob);
std::vector<Mat> outBlobs;
std::vector<Mat> outBlobs, internalBlobs;

cv::setNumThreads(cv::getNumberOfCPUs());

Ptr<Layer> layer = cv::dnn::LayerFactory::createLayerInstance("Convolution", lp);
layer->allocate(inpBlobs, outBlobs);
std::vector<MatShape> inputShapes(1, shape(inpBlob)), outShapes, internals;
layer->getMemoryShapes(inputShapes, 0, outShapes, internals);
for (int i = 0; i < outShapes.size(); i++)
{
outBlobs.push_back(Mat(outShapes[i], CV_32F));
}
for (int i = 0; i < internals.size(); i++)
{
internalBlobs.push_back(Mat());
if (total(internals[i]))
internalBlobs.back().create(internals[i], CV_32F);
}

layer->finalize(inpBlobs, outBlobs);

Mat inpBlob2D = inpBlob.reshape(1, outCn);
Mat wgtBlob2D = wgtBlob.reshape(1, outCn*(inpCn/groups));
Expand All @@ -83,7 +97,7 @@ PERF_TEST_P( ConvolutionPerfTest, perf, Combine(

TEST_CYCLE_N(10)
{
layer->forward(inpBlobs, outBlobs);
layer->forward(inpBlobs, outBlobs, internalBlobs);
}

SANITY_CHECK_NOTHING();
Expand Down
4 changes: 2 additions & 2 deletions modules/dnn/src/caffe/caffe_importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ class CaffeImporter : public Importer
}
}

void blobShapeFromProto(const caffe::BlobProto &pbBlob, std::vector<int>& shape)
void blobShapeFromProto(const caffe::BlobProto &pbBlob, MatShape& shape)
{
shape.clear();
if (pbBlob.has_num() || pbBlob.has_channels() || pbBlob.has_height() || pbBlob.has_width())
Expand All @@ -215,7 +215,7 @@ class CaffeImporter : public Importer

void blobFromProto(const caffe::BlobProto &pbBlob, cv::Mat &dstBlob)
{
std::vector<int> shape;
MatShape shape;
blobShapeFromProto(pbBlob, shape);

dstBlob.create((int)shape.size(), &shape[0], CV_32F);
Expand Down
Loading

0 comments on commit 9b73fee

Please sign in to comment.