LCOV - code coverage report
Current view: top level - nntrainer/tensor - var_grad.cpp (source / functions) Coverage Total Hit
Test: coverage_filtered.info Lines: 71.4 % 28 20
Test Date: 2025-12-14 20:38:17 Functions: 50.0 % 4 2

            Line data    Source code
       1              : // SPDX-License-Identifier: Apache-2.0
       2              : /**
       3              :  * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
       4              :  *
       5              :  * @file   var_grad.cpp
       6              :  * @date   13 November 2020
       7              :  * @see    https://github.com/nnstreamer/nntrainer
       8              :  * @author Parichay Kapoor <pk.kapoor@samsung.com>
       9              :  * @bug    No known bugs except for NYI items
      10              :  * @brief  This is Var_Grad Class for Neural Network
      11              :  *
      12              :  */
      13              : 
      14              : #include <util_func.h>
      15              : #include <var_grad.h>
      16              : 
      17              : #include <nntrainer_error.h>
      18              : 
      19              : namespace nntrainer {
      20              : 
      21          405 : Var_Grad::Var_Grad(const TensorDim &dim, const Initializer init,
      22              :                    bool need_gradient, bool alloc_now,
      23          405 :                    const std::string &name) :
      24          405 :   is_dependent(false),
      25          405 :   is_first_access_gradient(false),
      26          405 :   is_last_access_gradient(false) {
      27            0 :   var = std::make_shared<Tensor>(dim, alloc_now, init, name);
      28              : 
      29          405 :   std::string grad_name = name + grad_suffix;
      30          405 :   if (need_gradient)
      31              :     /**
      32              :      * @todo gradient initializer should be none, and then they should be set
      33              :      * zero right before using by the user itself.
      34              :      */
      35              :     grad =
      36          642 :       std::make_shared<Tensor>(dim, alloc_now, Initializer::ZEROS, grad_name);
      37              :   else
      38           84 :     grad = std::make_shared<Tensor>(grad_name);
      39          405 : }
      40              : 
      41          258 : Var_Grad::Var_Grad(const TensorDim &dim_v, const TensorDim &dim_g,
      42              :                    const Initializer init, bool need_gradient, bool alloc_now,
      43          258 :                    const std::string &name) :
      44          258 :   is_dependent(false),
      45          258 :   is_first_access_gradient(false),
      46          258 :   is_last_access_gradient(false) {
      47            0 :   var = std::make_shared<Tensor>(dim_v, alloc_now, init, name);
      48              : 
      49          258 :   std::string grad_name = name + grad_suffix;
      50          258 :   if (need_gradient)
      51              :     /**
      52              :      * @todo gradient initializer should be none, and then they should be set
      53              :      * zero right before using by the user itself.
      54              :      */
      55              : 
      56              :     grad =
      57          484 :       std::make_shared<Tensor>(dim_g, alloc_now, Initializer::ZEROS, grad_name);
      58              :   else
      59           16 :     grad = std::make_shared<Tensor>(grad_name);
      60          258 : }
      61              : 
      62            0 : void Var_Grad::initializeVariable(const Tensor &preallocated) {
      63              :   /**
      64              :    * Making a new tensor is intentional here as this tensor is not shared
      65              :    * with other layers but the internal memory is.
      66              :    */
      67            0 :   var = std::make_shared<Tensor>(preallocated);
      68              :   /** intentionally not initialized tensor memory for shared tensors */
      69            0 : }
      70              : 
      71            0 : void Var_Grad::initializeGradient(const Tensor &preallocated) {
      72              :   /**
      73              :    * Making a new tensor is intentional here as this tensor is not shared
      74              :    * with other layers but the internal memory is.
      75              :    */
      76            0 :   grad = std::make_shared<Tensor>(preallocated);
      77              :   /** intentionally not initialized tensor memory for shared tensors */
      78            0 : }
      79              : 
      80              : } // namespace nntrainer
        

Generated by: LCOV version 2.0-1