LCOV - code coverage report
Current view: top level - nntrainer/layers/loss - constant_derivative_loss_layer.cpp (source / functions) Coverage Total Hit
Test: coverage_filtered.info Lines: 27.8 % 18 5
Test Date: 2025-12-14 20:38:17 Functions: 66.7 % 6 4

            Line data    Source code
       1              : // SPDX-License-Identifier: Apache-2.0
       2              : /**
       3              :  * Copyright (C) 2021 Jihoon Lee <jhoon.it.lee@samsung.com>
       4              :  *
       5              :  * @file constant_derivative_loss_layer.cpp
       6              :  * @date 05 Oct 2021
       7              :  * @brief This patch contains constant derivative loss implementation
       8              :  * @note This is special type of loss to feed an arbitrary derivative value to
       9              :  * the last layer.
      10              :  * @see https://github.com/nnstreamer/nntrainer
      11              :  * @author Jihoon Lee <jhoon.it.lee@samsung.com>
      12              :  * @bug No known bugs except for NYI items
      13              :  */
      14              : 
      15              : #include <constant_derivative_loss_layer.h>
      16              : 
      17              : #include <layer_context.h>
      18              : 
      19              : namespace nntrainer {
      20              : 
      21              : static constexpr int SINGLE_INOUT_IDX = 0;
      22              : /// @todo make this property
      23              : static constexpr float value = 1.0f;
      24              : 
      25           18 : ConstantDerivativeLossLayer::ConstantDerivativeLossLayer() : LossLayer() {}
      26           36 : ConstantDerivativeLossLayer::~ConstantDerivativeLossLayer(){};
      27              : 
      28            0 : void ConstantDerivativeLossLayer::forwarding(RunLayerContext &context,
      29              :                                              bool training) {
      30            0 :   Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
      31            0 :   Tensor &y = context.getInput(SINGLE_INOUT_IDX);
      32              : 
      33              :   // fill the output
      34            0 :   hidden_.fill(y);
      35              : 
      36            0 :   if (context.isLabelAvailable(SINGLE_INOUT_IDX)) {
      37              :     Tensor l(1);
      38            0 :     l.setValue(value);
      39              :     // update the loss value
      40            0 :     LossLayer::updateLoss(context, l);
      41            0 :   }
      42            0 : }
      43              : 
      44           33 : void ConstantDerivativeLossLayer::setProperty(
      45              :   const std::vector<std::string> &values) {
      46              :   /// update set value
      47           33 :   LossLayer::setProperty(values);
      48           30 : }
      49              : 
      50            0 : void ConstantDerivativeLossLayer::calcDerivative(RunLayerContext &context) {
      51            0 :   Tensor &ret_derivative = context.getOutgoingDerivative(SINGLE_INOUT_IDX);
      52            0 :   ret_derivative.setValue(1.0f);
      53            0 : }
      54              : 
      55              : } // namespace nntrainer
        

Generated by: LCOV version 2.0-1