LCOV - code coverage report
Current view: top level - nntrainer/tensor - tensor_wrap_specs.h (source / functions) Coverage Total Hit
Test: coverage_filtered.info Lines: 100.0 % 8 8
Test Date: 2025-12-14 20:38:17 Functions: 100.0 % 2 2

            Line data    Source code
       1              : // SPDX-License-Identifier: Apache-2.0
       2              : /**
       3              :  * Copyright (C) 2021 Parichay Kapoor <pk.kapoor@samsung.com>
       4              :  *
       5              :  * @file   tensor_wrap_specs.h
       6              :  * @date   26 July 2021
       7              :  * @see    https://github.com/nnstreamer/nntrainer
       8              :  * @author Parichay Kapoor <pk.kapoor@samsung.com>
       9              :  * @bug    No known bugs except for NYI items
      10              :  * @brief  This is specs for various tensor wrappers
      11              :  *
      12              :  */
      13              : 
      14              : #ifndef __TENSOR_WRAP_SPECS_H__
      15              : #define __TENSOR_WRAP_SPECS_H__
      16              : 
      17              : #include <memory>
      18              : #include <tuple>
      19              : 
      20              : #include <common.h>
      21              : #include <tensor.h>
      22              : 
      23              : namespace nntrainer {
      24              : 
      25              : /**
      26              :  * @brief     Enumeration of Weight Regularizer
      27              :  * @todo      Update to TensorRegularizer
      28              :  */
      29              : enum class WeightRegularizer {
      30              :   L2NORM, /**< L2 norm regularization */
      31              :   NONE,   /**< no regularization */
      32              :   UNKNOWN /**< Unknown */
      33              : };
      34              : 
      35              : /**
      36              :  * @brief define the lifespan of the given tensor to reduce peak memory
      37              :  *
      38              :  */
      39              : enum class TensorLifespan {
      40              :   UNMANAGED = 0b000, /**< tensor with no lifespan, will not be allocated */
      41              :   FORWARD_FUNC_LIFESPAN = 0b001, /**< tensor must not be reset before during the
      42              :                            forward function call, eg. temporary tensors
      43              :                            needed during forward operations */
      44              :   CALC_DERIV_LIFESPAN = 0b010,   /**< must be valid during calcDerivative() */
      45              :   CALC_GRAD_LIFESPAN = 0b100, /**< tensor must be valid during calcGradient() */
      46              :   CALC_AGRAD_LIFESPAN =
      47              :     0b1000, /**< tensor must be valid during calcGradient() */
      48              :   CALC_GRAD_DERIV_LIFESPAN = 0b110, /**< tensor must not be reset before during
      49              :                              the calc_grad and clac_deriv call, eg. temporary
      50              :                              tensors needed during backward operations */
      51              :   CALC_GRAD_DERIV_AGRAD_LIFESPAN =
      52              :     0b1110,                      /**< tensor must not be reset before during
      53              :                    the calc_grad, clac_deriv and apply gradient call,
      54              :                    eg. temporary tensors needed during backward operations */
      55              :   FORWARD_GRAD_LIFESPAN = 0b101, /**< Forward + grad lifespan */
      56              :   FORWARD_GRAD_AGRAD_LIFESPAN =
      57              :     0b1101, /**< Forward + grad + apply gradient lifespan */
      58              :   FORWARD_DERIV_LIFESPAN = 0b011, /**< Forward + deriv lifespan */
      59              :   BACKWARD_FUNC_LIFESPAN =
      60              :     CALC_GRAD_DERIV_AGRAD_LIFESPAN, /**< Alias of CALC_GRAD_DERIV_AGRAD_LIFESPAN
      61              :                                      */
      62              :   ITERATION_LIFESPAN = 0b1111, /**< tensor must not be reset until the owning
      63              :                         layer finishes its execution in the current
      64              :                         iteration, eg. hidden memory/cells of RNN */
      65              :   EPOCH_LIFESPAN = 0b11111, /**< tensor must be valid before the epoch ends */
      66              :   FORWARD_INFER_LIFESPAN =
      67              :     0b100000,               /**< tensor is only used for only inference */
      68              :   MAX_LIFESPAN = 0b1111111, /**< tensor must not be reset until the end of the
      69              :                   model  execution, eg. layer weights */
      70              :   VIRTUAL = 0b11111111,     /**< virtual lifespan, tensor exists but does not
      71              :                                allocate memory */
      72              : };
      73              : 
      74              : /**
      75              :  * @brief Specification of the Weight as a tensor wrapper
      76              :  *
      77              :  * @details The tuple values are dimension, initializer, regularizer,
      78              :  * regularizer_constant, decay, clip gradient constant, need_gradient property,
      79              :  * name, output axis of the tensor object and loss Scale Factor, is_mixed,
      80              :  * is_virtual.
      81              :  *
      82              :  * @note virtual tensor doesn't allocate any memory but create an empty tensor
      83              :  */
      84              : typedef std::tuple<TensorDim, TensorDim, Initializer, WeightRegularizer, float,
      85              :                    float, float, bool, const std::string, unsigned int, float,
      86              :                    bool, bool>
      87              :   WeightSpec;
      88              : 
      89              : /**
      90              :  * @brief Specification of the Var_Grad (trainable tensor) as a tensor wrapper
      91              :  *
      92              :  * @details The tuple values are dimension, initializer, need_gradient property,
      93              :  * the name, and lifespan of the Var_Grad object.
      94              :  */
      95              : typedef std::tuple<TensorDim, Initializer, bool, const std::string,
      96              :                    TensorLifespan, ml::train::LayerComputeEngine>
      97              :   VarGradSpec;
      98              : 
      99              : /**
     100              :  * @brief Tensor Specification which describes how this tensor should be
     101              :  * allocated and managed
     102              :  *
     103              :  */
     104              : struct TensorSpecV2 {
     105              : 
     106              :   /**
     107              :    * @brief Tensor is being managed by nntrainer, this enum defines how the
     108              :    * value should be recognized inside nntrainer tensor managing scheme.
     109              :    *
     110              :    */
     111              :   enum class RequestType {
     112              :     PLACEHOLDER, /**< Placeholder defines that nntrainer should never care about
     113              :                     the memory inside the particualar tensor */
     114              :     UNIQUE, /**< Unique means a simple tensor that will be owned explicitly the
     115              :                current request */
     116              :     READ_ONLY_VIEW, /**< Readonly view defines a view of which ownership of @a
     117              :                        underlying memory is at another tensor, also hinting
     118              :                        nntrainer that the operation upon this particular tensor
     119              :                        will never change value of the underlying memory */
     120              :     MAYBE_MODIFYING_VIEW, /**< Maybe modifying view defines a (possible) view of
     121              :                        which ownership of @a underlying memory is at another
     122              :                        tensor, while hinting the nntrainer this tensor will do
     123              :                        some modification of the underlying memory. nntrainer
     124              :                        will try to make this particular tensor a view of the
     125              :                        stated reference. If making a view of reference is likely
     126              :                        to break the data integrity, nntrainer will request an
     127              :                        independent memory slot, in this case, it is user's
     128              :                        responsibility to copy the data. */
     129              :     SHARED, /**< Shared defines a shared tensor ownership for the given
     130              :                identifier, it is user's responsibility to guarantee that
     131              :                dimension and initializer of shared tensor if exactly same as the
     132              :                user will be agnostic about when and who will actually request
     133              :                the certain tensor. */
     134              :   };
     135              : 
     136              :   RequestType request_type = RequestType::UNIQUE; /**< Type of request */
     137              :   std::string name;                               /**< Identifier */
     138              :   TensorDim dim;                                  /**< dimension */
     139              :   TensorLifespan ls;                              /**< lifespan */
     140              :   Initializer initializer = Initializer::NONE;    /**< initializer */
     141              : 
     142              :   /** ONLY USED FOR READ_ONLY_VIEW, MAYBE_MODIFYING_VIEW */
     143              :   unsigned int offset = 0u;   /**< tensor offset */
     144              :   std::string reference_name; /**< reference name */
     145              : 
     146              :   /** ONLY FOR THE GRANULAR CONTROL OF LIFE OUTSIDE OF LAYER NODE */
     147              :   /// @todo make this as an opaque information with PIMPL
     148              :   std::vector<unsigned> additional_exec_order = {};
     149              : };
     150              : 
     151              : /**
     152              :  * @brief variable + gradient specification
     153              :  *
     154              :  */
     155        28530 : struct VarGradSpecV2 {
     156              : 
     157              :   /**
     158              :    * @brief Construct a new Var Grad Spec V2 object
     159              :    *
     160              :    */
     161         5796 :   VarGradSpecV2() = default;
     162              : 
     163              :   /**
     164              :    * @brief Copy construct
     165              :    *
     166              :    * @param rhs
     167              :    */
     168        10897 :   VarGradSpecV2(const VarGradSpecV2 &rhs) :
     169        10897 :     variable_spec(rhs.variable_spec),
     170        10620 :     gradient_spec(rhs.gradient_spec
     171        10897 :                     ? std::make_unique<TensorSpecV2>(*rhs.gradient_spec)
     172        10897 :                     : nullptr) {}
     173              : 
     174              :   /**
     175              :    * @brief copy assignment
     176              :    *
     177              :    * @param rhs
     178              :    * @return VarGradSpecV2&
     179              :    */
     180              :   VarGradSpecV2 &operator=(const VarGradSpecV2 &rhs) {
     181              :     variable_spec = rhs.variable_spec;
     182              :     gradient_spec = rhs.gradient_spec
     183              :                       ? std::make_unique<TensorSpecV2>(*rhs.gradient_spec)
     184              :                       : nullptr;
     185              :     return *this;
     186              :   }
     187              : 
     188              :   /**
     189              :    * @brief Move Construct
     190              :    *
     191              :    */
     192        11837 :   VarGradSpecV2(VarGradSpecV2 &&) noexcept = default;
     193              :   VarGradSpecV2 &operator=(VarGradSpecV2 &&) noexcept = default;
     194              : 
     195              :   TensorSpecV2 variable_spec; /**< variable spec */
     196              :   std::unique_ptr<TensorSpecV2> gradient_spec =
     197              :     nullptr; /**< gradient spec, if null it cannot be trained*/
     198              : };
     199              : 
     200              : /**
     201              :  * @brief weight specification
     202              :  *
     203              :  */
     204              : struct WeightSpecV2 {
     205              :   VarGradSpecV2 vg_spec; /**< variable + graident specification */
     206              :   WeightRegularizer regularizer = WeightRegularizer::NONE; /**< regularizer */
     207              :   float regularizer_constant = 0.0f; /**< regularizer constant */
     208              :   float decay = 0.0f;                /**< decay constant */
     209              :   float clip_by_global_norm = 0.0f;  /**< clip the gradient by norm */
     210              : };
     211              : 
     212              : } // namespace nntrainer
     213              : 
     214              : #endif /** __TENSOR_WRAP_SPECS_H__ */
        

Generated by: LCOV version 2.0-1