@@ -316,7 +316,7 @@ void LstmLayer::forwardSequence(int batchSize,
316316 }
317317 if (prevOutput_) {
318318 frameGate->setData (lstmValue.gateValue );
319- frameGate->mul (prevOutput_, weight_->getW (), 1 , 1 );
319+ frameGate->mul (* prevOutput_, * weight_->getW (), 1 , 1 );
320320 }
321321 }
322322 AsyncGpuBlock asyncGpuBlock;
@@ -338,7 +338,7 @@ void LstmLayer::forwardSequence(int batchSize,
338338 frameOutput->setData (lstmValue.outputValue );
339339 nextFrame (reversed_, getSize ());
340340 frameGate->setData (lstmValue.gateValue );
341- frameGate->mul (frameOutput, weight_->getW (), 1 , 1 );
341+ frameGate->mul (* frameOutput, * weight_->getW (), 1 , 1 );
342342 }
343343 }
344344 if (n != numSequences - 1 ) {
@@ -348,7 +348,7 @@ void LstmLayer::forwardSequence(int batchSize,
348348 if (!reversed_) {
349349 if (!prevState_) lstmValue.prevStateValue = nullptr ;
350350 if (prevOutput_) {
351- frameGate->mul (frameOutput, weight_->getW (), 1 , 1 );
351+ frameGate->mul (* frameOutput, * weight_->getW (), 1 , 1 );
352352 }
353353 } else {
354354 lstmValue.prevStateValue = nullptr ;
@@ -470,7 +470,7 @@ void LstmLayer::backwardSequence(int batchSize,
470470 frameGate->setData (lstmGrad.gateGrad );
471471 nextFrame (reversed_, getSize ());
472472 frameOutput->setData (lstmGrad.outputGrad );
473- frameOutput->mul (frameGate, weightT, 1 , 1 );
473+ frameOutput->mul (* frameGate, * weightT, 1 , 1 );
474474 } else {
475475 nextFrame (reversed_, getSize ());
476476 }
@@ -479,14 +479,14 @@ void LstmLayer::backwardSequence(int batchSize,
479479 if (weight_->getWGrad ()) {
480480 if (!reversed_) {
481481 weight_->getWGrad ()->mul (
482- output_.value ->subMatrix (start, length - 1 )->getTranspose (),
483- gate_.grad ->subMatrix (start + 1 , length - 1 ),
482+ * output_.value ->subMatrix (start, length - 1 )->getTranspose (),
483+ * gate_.grad ->subMatrix (start + 1 , length - 1 ),
484484 1 ,
485485 1 );
486486 } else {
487487 weight_->getWGrad ()->mul (
488- output_.value ->subMatrix (start + 1 , length - 1 )->getTranspose (),
489- gate_.grad ->subMatrix (start, length - 1 ),
488+ * output_.value ->subMatrix (start + 1 , length - 1 )->getTranspose (),
489+ * gate_.grad ->subMatrix (start, length - 1 ),
490490 1 ,
491491 1 );
492492 }
@@ -541,15 +541,15 @@ void LstmLayer::forwardBatch(int batchSize,
541541
542542 if (n != 0 ) {
543543 MatrixPtr batch1 = batchValue_->getBatchValue (n - 1 , batchSize);
544- gateValue->mul (batch1, weight_->getW (), 1 , 1 );
544+ gateValue->mul (* batch1, * weight_->getW (), 1 , 1 );
545545 } else if (prevOutput_) {
546546 Matrix::resizeOrCreate (prevBatchOutput2_,
547547 gateValue->getHeight (),
548548 getSize (),
549549 false ,
550550 useGpu_);
551551 batchValue_->prevOutput2Batch (*prevOutput_, *prevBatchOutput2_);
552- gateValue->mul (prevBatchOutput2_, weight_->getW (), 1 , 1 );
552+ gateValue->mul (* prevBatchOutput2_, * weight_->getW (), 1 , 1 );
553553
554554 batchValue_->prevOutput2Batch (*prevState_,
555555 *totalState_->subMatrix (0 , numSequences));
@@ -672,16 +672,16 @@ void LstmLayer::backwardBatch(int batchSize,
672672
673673 if (n != 0 ) {
674674 MatrixPtr tmp = batchGrad_->getBatchValue (n - 1 , batchSize);
675- tmp->mul (gateGrad, weightT, 1 , 1 );
675+ tmp->mul (* gateGrad, * weightT, 1 , 1 );
676676 }
677677
678678 if (n != 0 && weight_->getWGrad ()) {
679679 /* backward weight */
680680 MatrixPtr outputValue = batchValue_->getBatchValue (n - 1 , batchSize);
681- weight_->getWGrad ()->mul (outputValue->getTranspose (), gateGrad, 1 , 1 );
681+ weight_->getWGrad ()->mul (* outputValue->getTranspose (), * gateGrad, 1 , 1 );
682682 } else if (prevOutput_ && weight_->getWGrad ()) {
683683 weight_->getWGrad ()->mul (
684- prevBatchOutput2_->getTranspose (), gateGrad, 1 , 1 );
684+ * prevBatchOutput2_->getTranspose (), * gateGrad, 1 , 1 );
685685 }
686686 }
687687 }
0 commit comments