LCOV - code coverage report
Current view: top level - nntrainer/layers/loss - mse_loss_layer.cpp (source / functions) Coverage Total Hit
Test: coverage_filtered.info Lines: 80.0 % 40 32
Test Date: 2025-12-14 20:38:17 Functions: 100.0 % 2 2

            Line data    Source code
       1              : // SPDX-License-Identifier: Apache-2.0
       2              : /**
       3              :  * Copyright (C) 2021 Parichay Kapoor <pk.kapoor@samsung.com>
       4              :  *
       5              :  * @file   mse_loss_layer.cpp
       6              :  * @date   24 June 2021
       7              :  * @brief  This is MSE Loss Layer Class of Neural Network
       8              :  * @see    https://github.com/nnstreamer/nntrainer
       9              :  * @author Parichay Kapoor <pk.kapoor@samsung.com>
      10              :  * @bug    No known bugs except for NYI items
      11              :  *
      12              :  */
      13              : 
      14              : #include <layer_context.h>
      15              : #include <mse_loss_layer.h>
      16              : 
      17              : namespace nntrainer {
      18              : 
      19              : static constexpr size_t SINGLE_INOUT_IDX = 0;
      20              : 
      21         1033 : void MSELossLayer::forwarding(RunLayerContext &context, bool training) {
      22         1033 :   Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
      23              : 
      24         1033 :   Tensor empty_tensor;
      25         1033 :   Tensor &y = context.getInput(SINGLE_INOUT_IDX).getDataType() ==
      26              :                   ml::train::TensorDim::DataType::FP32
      27         1033 :                 ? context.getInput(SINGLE_INOUT_IDX)
      28              :                 : empty_tensor;
      29              : 
      30         1033 :   if (y.empty())
      31            0 :     y = context.getInput(SINGLE_INOUT_IDX)
      32            0 :           .clone(ml::train::TensorDim::DataType::FP32);
      33              : 
      34              :   // hidden_ <- y2 - y;
      35         1033 :   if (context.isLabelAvailable(SINGLE_INOUT_IDX)) {
      36          699 :     Tensor &y2 = context.getLabel(SINGLE_INOUT_IDX);
      37          699 :     y2.subtract(y, hidden_);
      38              : 
      39              :     /** calculate sum of squares normalized by size */
      40          699 :     float l2norm = hidden_.l2norm();
      41          699 :     l2norm *= l2norm / hidden_.size();
      42              : 
      43              :     /** wrap in tensor for update loss */
      44          699 :     Tensor l = Tensor(TensorDim(1, 1, 1, 1), &l2norm);
      45          699 :     LossLayer::updateLoss(context, l);
      46          699 :   }
      47              : 
      48              :   // fill the output
      49         1033 :   hidden_.fill(y);
      50         1033 : }
      51              : 
      52          599 : void MSELossLayer::calcDerivative(RunLayerContext &context) {
      53          599 :   Tensor empty_tensor;
      54              : 
      55              :   Tensor &ret_derivative =
      56          599 :     context.getOutgoingDerivative(SINGLE_INOUT_IDX).getDataType() ==
      57              :         ml::train::TensorDim::DataType::FP32
      58          599 :       ? context.getOutgoingDerivative(SINGLE_INOUT_IDX)
      59              :       : empty_tensor;
      60              : 
      61          599 :   if (ret_derivative.empty())
      62            0 :     ret_derivative = context.getOutgoingDerivative(SINGLE_INOUT_IDX)
      63            0 :                        .clone(ml::train::TensorDim::DataType::FP32);
      64          599 :   Tensor empty_tensor1;
      65          599 :   Tensor &y = context.getInput(SINGLE_INOUT_IDX).getDataType() ==
      66              :                   ml::train::TensorDim::DataType::FP32
      67          599 :                 ? context.getInput(SINGLE_INOUT_IDX)
      68              :                 : empty_tensor1;
      69              : 
      70          599 :   if (y.empty())
      71            0 :     y = context.getInput(SINGLE_INOUT_IDX)
      72            0 :           .clone(ml::train::TensorDim::DataType::FP32);
      73              : 
      74          599 :   const Tensor &y2 = context.getIncomingDerivative(SINGLE_INOUT_IDX);
      75              : 
      76          599 :   y.subtract(y2, ret_derivative);
      77          599 :   float divider = ((float)y.size()) / 2;
      78          599 :   if (ret_derivative.divide_i(divider) != ML_ERROR_NONE) {
      79              :     throw std::runtime_error(
      80            0 :       "[MSELossLayer::calcDerivative] Error when calculating loss");
      81              :   }
      82              : 
      83              :   // Loss Scale needs Full precsiion of ret_derivative. Therefore,
      84              :   // ret_derivateive should be FP32 when applying scale, and after applying it
      85              :   // need to convert original type for backpropagating.
      86              : 
      87          599 :   LossLayer::applyLossScale(context, ret_derivative);
      88              : 
      89          599 :   if (context.getOutgoingDerivative(SINGLE_INOUT_IDX).getDataType() !=
      90              :       ml::train::TensorDim::DataType::FP32)
      91            0 :     context.getOutgoingDerivative(SINGLE_INOUT_IDX).copyData(ret_derivative);
      92          599 : }
      93              : 
      94              : } // namespace nntrainer
        

Generated by: LCOV version 2.0-1