LCOV - code coverage report
Current view: top level - nntrainer/layers - addition_layer.cpp (source / functions) Coverage Total Hit
Test: coverage_filtered.info Lines: 45.8 % 48 22
Test Date: 2025-12-14 20:38:17 Functions: 66.7 % 6 4

            Line data    Source code
       1              : // SPDX-License-Identifier: Apache-2.0
       2              : /**
       3              :  * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
       4              :  *
       5              :  * @file   addition_layer.cpp
       6              :  * @date   30 July 2020
       7              :  * @see    https://github.com/nnstreamer/nntrainer
       8              :  * @author Parichay Kapoor <pk.kapoor@samsung.com>
       9              :  * @bug    No known bugs except for NYI items
      10              :  * @brief  This is Addition Layer Class for Neural Network
      11              :  *
      12              :  */
      13              : 
      14              : #include <addition_layer.h>
      15              : #include <nntrainer_error.h>
      16              : #include <nntrainer_log.h>
      17              : #include <node_exporter.h>
      18              : #include <util_func.h>
      19              : 
      20              : #include <layer_context.h>
      21              : 
      22              : namespace nntrainer {
      23              : 
      24              : static constexpr size_t SINGLE_INOUT_IDX = 0;
      25              : 
      26          192 : void AdditionLayer::finalize(InitLayerContext &context) {
      27          192 :   context.setOutputDimensions({context.getInputDimensions()[0]});
      28          192 : }
      29              : 
      30          275 : void AdditionLayer::forwarding(RunLayerContext &context, bool training) {
      31          275 :   Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
      32              : 
      33              :   /** @todo check possibility for in-place of addition layer */
      34          861 :   for (unsigned int idx = 0; idx < context.getNumInputs(); ++idx) {
      35          586 :     const Tensor &input_ = context.getInput(idx);
      36          586 :     if (!idx) {
      37          275 :       hidden_.copy(input_);
      38              :     } else {
      39          311 :       hidden_.add_i(input_);
      40              :     }
      41              :   }
      42          275 : }
      43              : 
      44            0 : void AdditionLayer::incremental_forwarding(RunLayerContext &context,
      45              :                                            unsigned int from, unsigned int to,
      46              :                                            bool training) {
      47            0 :   Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
      48            0 :   TensorDim hidden_dim = hidden_.getDim();
      49            0 :   TensorDim hidden_step_dim = hidden_dim;
      50              : 
      51            0 :   hidden_step_dim.batch(1);
      52            0 :   hidden_step_dim.height(to - from);
      53              : 
      54            0 :   for (unsigned int b = 0; b < hidden_.batch(); ++b) {
      55              :     Tensor hidden_step = hidden_.getSharedDataTensor(
      56            0 :       hidden_step_dim, b * hidden_dim.getFeatureLen(), true);
      57              : 
      58              :     /** @todo check possibility for in-place of addition layer */
      59            0 :     for (unsigned int idx = 0; idx < context.getNumInputs(); ++idx) {
      60            0 :       const Tensor &input_ = context.getInput(idx);
      61            0 :       TensorDim input_dim = input_.getDim();
      62              : 
      63            0 :       TensorDim input_step_dim = input_dim;
      64            0 :       input_step_dim.batch(1);
      65            0 :       input_step_dim.height(to - from);
      66              : 
      67              :       Tensor input_step = input_.getSharedDataTensor(
      68            0 :         input_step_dim, b * input_dim.getFeatureLen(), true);
      69            0 :       if (!idx) {
      70            0 :         hidden_step.copy(input_step);
      71              :       } else {
      72            0 :         hidden_step.add_i(input_step);
      73              :       }
      74            0 :     }
      75            0 :   }
      76            0 : }
      77              : 
      78          138 : void AdditionLayer::calcDerivative(RunLayerContext &context) {
      79              : 
      80          432 :   for (unsigned int idx = 0; idx < context.getNumInputs(); ++idx) {
      81              :     /**
      82              :      * TODO: replace this with tensor assignment during optimization.
      83              :      * Tensor assignment needs to make sure that the previous connected layers
      84              :      * are not inplace
      85              :      */
      86          588 :     context.getOutgoingDerivative(idx).copy(
      87          588 :       context.getIncomingDerivative(SINGLE_INOUT_IDX));
      88              :   }
      89          138 : }
      90              : 
      91         1121 : void AdditionLayer::setProperty(const std::vector<std::string> &values) {
      92         1121 :   auto remain_props = loadProperties(values, add_props);
      93         1119 :   if (!remain_props.empty()) {
      94              :     std::string msg = "[AdditionLayer] Unknown Layer Properties count " +
      95            4 :                       std::to_string(values.size());
      96            8 :     throw exception::not_supported(msg);
      97              :   }
      98         1119 : }
      99              : 
     100            0 : void AdditionLayer::updateTensorsByInputDimensions(
     101              :   nntrainer::RunLayerContext &context,
     102              :   std::vector<nntrainer::TensorDim> input_dimensions) {
     103            0 :   for (size_t i = 0; i < context.getNumInputs(); ++i) {
     104            0 :     context.updateInput(i, input_dimensions[0]);
     105              :   }
     106            0 :   context.updateOutput(SINGLE_INOUT_IDX, input_dimensions[0]);
     107            0 : }
     108              : 
     109              : } /* namespace nntrainer */
        

Generated by: LCOV version 2.0-1