Skip to content

Commit cee9346

Browse files
committed
add some comments
1 parent f13aeb5 commit cee9346

File tree

5 files changed

+50
-16
lines changed

5 files changed

+50
-16
lines changed

paddle/function/cross_map_normal_op.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ limitations under the License. */
1717

1818
namespace paddle {
1919

20-
// NCHW
2120
template <>
2221
void CrossMapNormal<DEVICE_TYPE_CPU>(real* outputs,
2322
real* denoms,
@@ -36,6 +35,10 @@ void CrossMapNormal<DEVICE_TYPE_CPU>(real* outputs,
3635
CpuVector inputsV(numSamples * oneSample, inputs);
3736
CpuVector denomsV(numSamples * oneSample, denoms);
3837

38+
// f(x) = x * ( 1 + scale * SUM((x)^2) )^(-pow)
39+
// x represents inputs
40+
// f(x) represents outputs
41+
// denoms save the intermediate result for backward
3942
denomsV = denomsV.constant(1.0);
4043
const int start = -((int)size - 1) / 2;
4144
const int end = (int)size + start;

paddle/function/cross_map_normal_op.h

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,22 @@ limitations under the License. */
1818

1919
namespace paddle {
2020

21+
/**
22+
* \brief Cross map respose normalize forward.
23+
* The data structure of image data is NCHW.
24+
*
25+
* \param[out] outputs output data.
26+
* \param[in] denoms denoms buffer.
27+
* \param[in] inputs input data.
28+
* \param[in] numSamples batch size of input image.
29+
* \param[in] channels number of channel.
30+
* \param[in] height image height.
31+
* \param[in] width image width.
32+
* \param[in] size size.
33+
* \param[in] scale scale.
34+
* \param[in] pow scale.
35+
*
36+
*/
2137
template <DeviceType Device>
2238
void CrossMapNormal(real* outputs,
2339
real* denoms,
@@ -30,6 +46,24 @@ void CrossMapNormal(real* outputs,
3046
real scale,
3147
real pow);
3248

49+
/**
50+
* \brief Cross map respose normalize backward.
51+
* The data structure of image data is NCHW.
52+
*
53+
* \param[out] inputsGrad input grad.
54+
* \param[in] inputsValue input value.
55+
* \param[out] outputsValue output value.
56+
* \param[out] outputsGrad output grad.
57+
* \param[in] denoms denoms buffer.
58+
* \param[in] numSamples batch size of input image.
59+
* \param[in] channels number of channel.
60+
* \param[in] height image height.
61+
* \param[in] width image width.
62+
* \param[in] size size.
63+
* \param[in] scale scale.
64+
* \param[in] pow scale.
65+
*
66+
*/
3367
template <DeviceType Device>
3468
void CrossMapNormalGrad(real* inputsGrad,
3569
real* inputsValue,

paddle/gserver/layers/Layer.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ limitations under the License. */
1818
#include <functional>
1919
#include <memory>
2020
#include "ModelConfig.pb.h"
21+
#include "paddle/function/Function.h"
2122
#include "paddle/math/CpuSparseMatrix.h"
2223
#include "paddle/parameter/Parameter.h"
2324
#include "paddle/utils/ClassRegistrar.h"
@@ -100,6 +101,11 @@ class Layer {
100101
/// Mark input grad in(true) or out(false) of backward function.
101102
std::vector<bool> markInBackward_;
102103

104+
/// Layer forward function
105+
FunctionBase* forward_;
106+
/// Layer backward function
107+
FunctionBase* backward_;
108+
103109
public:
104110
/**
105111
* Wait until all input value ready.

paddle/gserver/layers/NormProjectionLayer.cpp

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -48,20 +48,17 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap,
4848
if (useGpu_) {
4949
forward_ = FunctionBase::funcRegistrar_.createByType(
5050
FUNC_NAME(CrossMapNormal, GPU));
51+
backward_ = FunctionBase::funcRegistrar_.createByType(
52+
FUNC_NAME(CrossMapNormalGrad, GPU));
5153
} else {
5254
forward_ = FunctionBase::funcRegistrar_.createByType(
5355
FUNC_NAME(CrossMapNormal, CPU));
56+
backward_ = FunctionBase::funcRegistrar_.createByType(
57+
FUNC_NAME(CrossMapNormalGrad, CPU));
5458
}
5559
forward_->init(
5660
FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_));
5761

58-
if (useGpu_) {
59-
backward_ = FunctionBase::funcRegistrar_.createByType(
60-
FUNC_NAME(CrossMapNormalGrad, GPU));
61-
} else {
62-
backward_ = FunctionBase::funcRegistrar_.createByType(
63-
FUNC_NAME(CrossMapNormalGrad, CPU));
64-
}
6562
backward_->init(
6663
FuncConfig().set("size", size_).set("scale", scale_).set("pow", pow_));
6764

@@ -74,18 +71,15 @@ void CMRProjectionNormLayer::forward(PassType passType) {
7471
/* malloc memory for the output_ if necessary */
7572
/* note: one sample correspond to one row */
7673
MatrixPtr input = inputLayers_[0]->getOutputValue();
77-
int batchSize = input->getHeight();
74+
size_t batchSize = input->getHeight();
7875
int size = getSize();
7976
resetOutput(batchSize, size);
8077

8178
MatrixPtr outV = getOutputValue();
8279

8380
Matrix::resizeOrCreate(denoms_, batchSize, size, /* trans */ false, useGpu_);
8481

85-
dims_ = {(size_t)batchSize,
86-
(size_t)channels_,
87-
(size_t)imgSizeH_,
88-
(size_t)imgSizeW_};
82+
dims_ = {batchSize, channels_, imgSizeH_, imgSizeW_};
8983
forward_->calc(
9084
{Tensor(input->getData(), dims_)},
9185
{Tensor(outV->getData(), dims_), Tensor(denoms_->getData(), dims_)},

paddle/gserver/layers/NormProjectionLayer.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ limitations under the License. */
1616

1717
#include <vector>
1818
#include "NormLayer.h"
19-
#include "paddle/function/Function.h"
2019
#include "paddle/math/Matrix.h"
2120

2221
namespace paddle {
@@ -43,7 +42,5 @@ class CMRProjectionNormLayer : public ResponseNormLayer {
4342

4443
protected:
4544
Dims dims_;
46-
FunctionBase* forward_;
47-
FunctionBase* backward_;
4845
};
4946
} // namespace paddle

0 commit comments

Comments
 (0)