@@ -48,20 +48,17 @@ bool CMRProjectionNormLayer::init(const LayerMap& layerMap,
4848 if (useGpu_) {
4949 forward_ = FunctionBase::funcRegistrar_.createByType (
5050 FUNC_NAME (CrossMapNormal, GPU));
51+ backward_ = FunctionBase::funcRegistrar_.createByType (
52+ FUNC_NAME (CrossMapNormalGrad, GPU));
5153 } else {
5254 forward_ = FunctionBase::funcRegistrar_.createByType (
5355 FUNC_NAME (CrossMapNormal, CPU));
56+ backward_ = FunctionBase::funcRegistrar_.createByType (
57+ FUNC_NAME (CrossMapNormalGrad, CPU));
5458 }
5559 forward_->init (
5660 FuncConfig ().set (" size" , size_).set (" scale" , scale_).set (" pow" , pow_));
5761
58- if (useGpu_) {
59- backward_ = FunctionBase::funcRegistrar_.createByType (
60- FUNC_NAME (CrossMapNormalGrad, GPU));
61- } else {
62- backward_ = FunctionBase::funcRegistrar_.createByType (
63- FUNC_NAME (CrossMapNormalGrad, CPU));
64- }
6562 backward_->init (
6663 FuncConfig ().set (" size" , size_).set (" scale" , scale_).set (" pow" , pow_));
6764
@@ -74,18 +71,15 @@ void CMRProjectionNormLayer::forward(PassType passType) {
7471 /* malloc memory for the output_ if necessary */
7572 /* note: one sample correspond to one row */
7673 MatrixPtr input = inputLayers_[0 ]->getOutputValue ();
77- int batchSize = input->getHeight ();
74+ size_t batchSize = input->getHeight ();
7875 int size = getSize ();
7976 resetOutput (batchSize, size);
8077
8178 MatrixPtr outV = getOutputValue ();
8279
8380 Matrix::resizeOrCreate (denoms_, batchSize, size, /* trans */ false , useGpu_);
8481
85- dims_ = {(size_t )batchSize,
86- (size_t )channels_,
87- (size_t )imgSizeH_,
88- (size_t )imgSizeW_};
82+ dims_ = {batchSize, channels_, imgSizeH_, imgSizeW_};
8983 forward_->calc (
9084 {Tensor (input->getData (), dims_)},
9185 {Tensor (outV->getData (), dims_), Tensor (denoms_->getData (), dims_)},
0 commit comments