LCOV - code coverage report
Current view: top level - api/ccapi/include - tensor_dim.h (source / functions) Coverage Total Hit
Test: coverage_filtered.info Lines: 100.0 % 13 13
Test Date: 2025-12-12 20:39:18 Functions: - 0 0

            Line data    Source code
       1              : // SPDX-License-Identifier: Apache-2.0
       2              : /**
       3              :  * Copyright (C) 2020 Jijoong Moon <jijoong.moon@samsung.com>
       4              :  */
       5              : /**
       6              :  * @file   tensor_dim.h
       7              :  * @date   22 May 2020
       8              :  * @brief  This is Tensor Dimension Class
       9              :  * @see    https://github.com/nnstreamer/nntrainer
      10              :  * @author Jijoong Moon <jijoong.moon@samsung.com>
      11              :  * @bug    No known bugs except for NYI items
      12              :  *
      13              :  */
      14              : 
      15              : #ifndef __TENSOR_DIM_H__
      16              : #define __TENSOR_DIM_H__
      17              : #ifdef __cplusplus
      18              : 
      19              : #include <array>
      20              : #include <iosfwd>
      21              : 
      22              : #include <bitset>
      23              : #include <vector>
      24              : 
      25              : #ifdef ENABLE_FP16
      26              : #ifdef USE__FP16
      27              : #define _FP16 __fp16
      28              : #else
      29              : #define _FP16 _Float16
      30              : #endif
      31              : #endif
      32              : 
      33              : namespace ml {
      34              : namespace train {
      35              : 
      36              : /**
      37              :  * @brief Tensor Dimension. This class is used to save dimension information
      38              :  *
      39              :  */
      40              : class TensorDim {
      41              : public:
      42              :   static constexpr const size_t MAXDIM = 4;
      43              : 
      44              :   /**
      45              :    * @brief Tensor Format. Channel Last or Channel First
      46              :    *
      47              :    */
      48              :   enum class Format { NCHW, NHWC };
      49              : 
      50              :   /**
      51              :    * @brief Tensor Data Type.
      52              :    * Currently support QINT4, QINT8, QINT16, BCQ, Q4_K, UINT8, UINT16, UINT32,
      53              :    * FP16 & FP32
      54              :    */
      55              :   enum class DataType {
      56              :     QINT4,  /** quantized int 4*/
      57              :     QINT8,  /** quantized int 8*/
      58              :     QINT16, /** quantized int 16*/
      59              :     BCQ,    /** binary-code-based quantized*/
      60              :     Q4_K,   /** Q4_K quantized*/
      61              :     Q6_K,   /** q6 k quantized */
      62              :     Q4_0,   /** Q4_0 k quantized */
      63              :     UINT4,  /** quantized unsigned int 4*/
      64              :     UINT8,  /** unsigned int 8 bit */
      65              :     UINT16, /** unsigned int 16 bit */
      66              :     UINT32, /** unsigned int 32 bit */
      67              :     FP16,   /** half precision */
      68              :     FP32    /** single precision */
      69              :   };
      70              : 
      71              :   /**
      72              :    * @brief Tensor Data Storage Order. Row-major or Column-major
      73              :    *
      74              :    */
      75              :   enum class StorageOrder { ROW_MAJOR, COL_MAJOR };
      76              : 
      77              :   /**
      78              :    * @brief Tensor Type which context to hold the Format & DataType
      79              :    *
      80              :    */
      81              :   struct TensorType {
      82              :     /**
      83              :      * @brief     Tensor Format : Default is NCHW
      84              :      */
      85              :     Format format;
      86              : 
      87              :     /**
      88              :      * @brief     Tensor Data Type : Default is FP32
      89              :      */
      90              :     DataType data_type;
      91              : 
      92              :     /**
      93              :      * @brief     Data Storage Order : Default is Row-major
      94              :      */
      95              :     StorageOrder storage_order;
      96              : 
      97              :     /**
      98              :      * @brief     Default creator of Tensor Type
      99              :      */
     100       608267 :     TensorType() :
     101       608292 :       format(Format::NCHW),
     102       608292 :       data_type(DataType::FP32),
     103       608171 :       storage_order(StorageOrder::ROW_MAJOR){};
     104              : 
     105              :     /**
     106              :      * @brief     Default creator of Tensor Type with Format & DataType
     107              :      */
     108              :     TensorType(Format fm, DataType d_type,
     109       739661 :                StorageOrder order = StorageOrder::ROW_MAJOR) :
     110       728838 :       format(fm), data_type(d_type), storage_order(order){};
     111              :   };
     112              : 
     113              :   /**
     114              :    * @brief Get the Num Dim object
     115              :    *
     116              :    * @return unsigned int fixed value of MAXDIM
     117              :    */
     118              :   static unsigned int getNumDim();
     119              : 
     120              :   /**
     121              :    * @brief     Creator of TensorDim with Format & DataType
     122              :    *
     123              :    * @param fm format NCHW | HNWC
     124              :    * @param d_type DataType QINT4 | QINT8 | QINT16 | BCQ | UINT8 | UINT16 |
     125              :    * UINT32 | FP16 | FP32
     126              :    * @param eff_dim_flag_ effective dimension flag (1 means it's effective)
     127              :    * @param dyn_dim_flag_ dynamic dimension flag (1 means it's unspecified)
     128              :    */
     129              :   TensorDim(TensorDim::Format fm, TensorDim::DataType d_type,
     130              :             const std::bitset<MAXDIM> &eff_dim_flag_ = 0b1111,
     131              :             const std::bitset<MAXDIM> &dyn_dim_flag_ = 0b0000);
     132              : 
     133              :   /**
     134              :    * @brief Construct a new Tensor Dim object
     135              :    *
     136              :    * @param t_type_ tensor type
     137              :    * @param eff_dim_flag_ effective dimension flag (1 means it's effective)
     138              :    * @param dyn_dim_flag_ dynamic dimension flag (1 means it's unspecified)
     139              :    */
     140              :   explicit TensorDim(TensorType t_type_ = TensorType(),
     141              :                      const std::bitset<MAXDIM> &eff_dim_flag_ = 0b1111,
     142              :                      const std::bitset<MAXDIM> &dyn_dim_flag_ = 0b0000);
     143              : 
     144              :   /**
     145              :    * @brief Construct a new Tensor Dim object
     146              :    *
     147              :    * @param dims std::initialize_list
     148              :    * @param t_type_ tensor type
     149              :    *
     150              :    * formats of {w}, {h, w}, {c, h, w}, {b, c, h, w} for the NCHW & NHWC are
     151              :    * accepted
     152              :    */
     153              :   TensorDim(std::initializer_list<size_t> dims,
     154              :             TensorType t_type_ = TensorType());
     155              : 
     156              :   /**
     157              :    * @brief Construct a new Tensor Dim object without batch dimension
     158              :    *
     159              :    * @param shapes shapes without batch dimension
     160              :    * @param t_type_ tensor type
     161              :    */
     162              :   TensorDim(const std::array<size_t, 3> &shapes,
     163              :             TensorType t_type_ = TensorType());
     164              : 
     165              :   /**
     166              :    * @brief Construct a new Tensor Dim object
     167              :    *
     168              :    * @param b batch
     169              :    * @param c channel
     170              :    * @param h height
     171              :    * @param w width
     172              :    * @param t_type format NCHW | HNWC , dataType FP32 | FP16
     173              :    * @param eff_dim_flag_ dimension bit flag to calculate the dynamic
     174              :    * dimension, rightmost is width
     175              :    */
     176              :   TensorDim(size_t b, size_t c, size_t h, size_t w,
     177              :             TensorType t_type_ = TensorType(),
     178              :             const std::bitset<MAXDIM> &eff_dim_flag_ = 0b1111,
     179              :             const std::bitset<MAXDIM> &dyn_dim_flag_ = 0b0000);
     180              : 
     181              :   /**
     182              :    * @brief Construct a new Tensor Dim object
     183              :    *
     184              :    * @param c channel
     185              :    * @param h height
     186              :    * @param w width
     187              :    * @param t_type format NCHW | HNWC , dataType FP32 | FP16
     188              :    * @param eff_dim_flag_ dimension bit flag to calculate the dynamic
     189              :    * dimension, rightmost is width
     190              :    */
     191              :   TensorDim(size_t c, size_t h, size_t w, TensorType t_type_ = TensorType(),
     192              :             const std::bitset<MAXDIM> &eff_dim_flag_ = 0b1111,
     193              :             const std::bitset<MAXDIM> &dyn_dim_flag_ = 0b0000);
     194              : 
     195              :   /**
     196              :    * @brief Construct a new Tensor Dim object
     197              :    *
     198              :    * @param h height
     199              :    * @param w width
     200              :    * @param t_type format NCHW | HNWC , dataType FP32 | FP16
     201              :    * @param eff_dim_flag_ dimension bit flag to calculate the dynamic
     202              :    * dimension, rightmost is width
     203              :    */
     204              :   TensorDim(size_t h, size_t w, TensorType t_type_ = TensorType(),
     205              :             const std::bitset<MAXDIM> &eff_dim_flag_ = 0b1111,
     206              :             const std::bitset<MAXDIM> &dyn_dim_flag_ = 0b0000);
     207              : 
     208              :   /**
     209              :    * @brief Construct a new Tensor Dim object
     210              :    *
     211              :    * @param w width
     212              :    * @param t_type format NCHW | HNWC , dataType FP32 | FP16
     213              :    * @param eff_dim_flag_ dimension bit flag to calculate the dynamic
     214              :    * dimension, rightmost is width
     215              :    */
     216              :   TensorDim(size_t w, TensorType t_type_ = TensorType(),
     217              :             const std::bitset<MAXDIM> &eff_dim_flag_ = 0b1111,
     218              :             const std::bitset<MAXDIM> &dyn_dim_flag_ = 0b0000);
     219              : 
     220              :   /**
     221              :    * @brief Construct a new Tensor Dim object
     222              :    *
     223              :    * @param b batch
     224              :    * @param c channel
     225              :    * @param h height
     226              :    * @param w width
     227              :    * @param fm format NCHW | HNWC
     228              :    * @param d_type DataType QINT4 | QINT8 | QINT16 | BCQ | UINT8 | UINT16 |
     229              :    * UINT32 | FP16 | FP32
     230              :    * @param eff_dim_flag_ dimension bit flag to calculate the dynamic
     231              :    * dimension, rightmost is width
     232              :    */
     233              :   TensorDim(size_t d0, size_t d1, size_t d2, size_t d3, TensorDim::Format fm,
     234              :             TensorDim::DataType d_type,
     235              :             const std::bitset<MAXDIM> &eff_dim_flag_ = 0b1111,
     236              :             const std::bitset<MAXDIM> &dyn_dim_flag_ = 0b0000);
     237              : 
     238              :   /**
     239              :    * @brief Copy construct a new tensor dim
     240              :    *
     241              :    * @param rhs tensor dim to copy from
     242              :    */
     243              :   TensorDim(const TensorDim &rhs) = default;
     244              : 
     245              :   /**
     246              :    * @brief Construct a new Tensor Dim object
     247              :    *
     248              :    * @param shape shape of format
     249              :    * @param t_type_ Tensor Type
     250              :    */
     251              :   TensorDim(const std::string &shape, TensorType t_type_ = TensorType());
     252              : 
     253              :   /**
     254              :    * @brief Construct a new Tensor Dim object
     255              :    *
     256              :    * @param shape shape of format
     257              :    * @param fm format NCHW | HNWC
     258              :    * @param d_type DataType QINT4 | QINT8 | QINT16 | BCQ | UINT8 | UINT16 |
     259              :    * UINT32 | FP16 | FP32
     260              :    * @param order data storage order ROW_MAJOR | COL_MAJOR
     261              :    */
     262              :   TensorDim(const std::string &shape, TensorDim::Format fm,
     263              :             TensorDim::DataType d_type = TensorDim::DataType::FP32,
     264              :             TensorDim::StorageOrder order = TensorDim::StorageOrder::ROW_MAJOR);
     265              : 
     266              :   /**
     267              :    * @brief Destroy the Tensor Dim object
     268              :    *
     269              :    */
     270              :   ~TensorDim() = default;
     271              : 
     272              :   /**
     273              :    *  @brief  Move constructor of Conv 2D Layer.
     274              :    *  @param[in] Conv2dLayer &&
     275              :    */
     276              :   TensorDim(TensorDim &&rhs) noexcept = default;
     277              : 
     278              :   /**
     279              :    * @brief  Move assignment operator.
     280              :    * @parma[in] rhs Optimizer to be moved.
     281              :    */
     282              :   TensorDim &operator=(TensorDim &&rhs) noexcept;
     283              : 
     284              :   /**
     285              :    * @brief  get data type size
     286              :    */
     287              :   unsigned int getDataTypeSize() const;
     288              : 
     289              :   /**
     290              :    * @brief Set the Dim Flag to retrieve effective dimension
     291              :    * @note eg) if dimension 4:1:10:1 should be squeezed to 4:10,
     292              :    *       set this to 0b1010, rightmost is width
     293              :    *
     294              :    * @param dim_flag_ dimension bit to calculate, rightmost is width
     295              :    */
     296              :   void setEffDimFlag(const std::bitset<MAXDIM> &dim_flag_);
     297              : 
     298              :   /**
     299              :    * @brief Set the dynamic Dim Flag to retrieve dynamic dimension (that can
     300              :    * change during running)
     301              :    * @note eg) if dimension 4:1:10:1 should be squeezed to dynamic to batch,
     302              :    *       set this to 0b1000, rightmost is width
     303              :    * @note when setting dynamic dimension, the calculation must remain
     304              :    * independent of the dynamic dimension. Please check this :)
     305              :    *
     306              :    * @param dim_flag_ dimension bit to calculate, rightmost is width
     307              :    */
     308              :   void setDynDimFlag(const std::bitset<MAXDIM> &dim_flag_);
     309              : 
     310              :   /**
     311              :    * @brief Get the Dim Flag to retrieve effective dimension
     312              :    * @note eg) if dimension 4:1:10:1 should be squeezed to 4:10,
     313              :    *       set this to 0b1010, rightmost is width
     314              :    *
     315              :    * @return dim_flag_ dimension bit to calculate, rightmost is width
     316              :    */
     317              :   const std::bitset<MAXDIM> &getEffDimFlag() const;
     318              : 
     319              :   /**
     320              :    * @brief Get the dynamic Dim Flag to retrieve dynamic dimension (that can
     321              :    * change during running)
     322              :    * @note eg) if dimension 4:1:10:1 should be squeezed to dynamic to batch,
     323              :    *       set this to 0b1000, rightmost is width
     324              :    * @note when setting dynamic dimension, the calculation must remain
     325              :    * independent of the dynamic dimension. Please check this :)
     326              :    *
     327              :    * @return dim_flag_ dimension bit to calculate, rightmost is width
     328              :    */
     329              :   const std::bitset<MAXDIM> &getDynDimFlag() const;
     330              : 
     331              :   /**
     332              :    * @brief  swap variable of Conv2D Layer
     333              :    * @parma[out] lhs Optimizer
     334              :    * @parma[in] rhs Optimizer
     335              :    */
     336              :   friend void swap(TensorDim &lhs, TensorDim &rhs) noexcept;
     337              : 
     338              :   /**
     339              :    * @brief get batch (axis 0)
     340              :    *
     341              :    * @return unsigned int batch size
     342              :    */
     343              :   size_t batch() const;
     344              : 
     345              :   /**
     346              :    * @brief get channel (axis 1)
     347              :    *
     348              :    * @return size_t channel size
     349              :    */
     350              :   size_t channel() const;
     351              : 
     352              :   /**
     353              :    * @brief get height (axis 2)
     354              :    *
     355              :    * @return size_t height size
     356              :    */
     357              :   size_t height() const;
     358              : 
     359              :   /**
     360              :    * @brief get width (axis 3)
     361              :    *
     362              :    * @return size_t width size
     363              :    */
     364              :   size_t width() const;
     365              : 
     366              :   /**
     367              :    * @brief Get the Data Len object
     368              :    *
     369              :    * @return size_t get length of the data
     370              :    */
     371              :   size_t getDataLen() const;
     372              : 
     373              :   /**
     374              :    * @brief Get the Feature Len object
     375              :    *
     376              :    * @return size_t get feature length
     377              :    */
     378              :   size_t getFeatureLen() const;
     379              : 
     380              :   /**
     381              :    * @brief set batch (axis 0)
     382              :    *
     383              :    * @param b batch to set
     384              :    */
     385              :   void batch(size_t b);
     386              : 
     387              :   /**
     388              :    * @brief set channel (axis 1)
     389              :    *
     390              :    * @param c channel to set
     391              :    */
     392              :   void channel(size_t c);
     393              : 
     394              :   /**
     395              :    * @brief set height (axis 2)
     396              :    *
     397              :    * @param h height to set
     398              :    */
     399              :   void height(size_t h);
     400              : 
     401              :   /**
     402              :    * @brief set width (axis 3)
     403              :    *
     404              :    * @param w width to set
     405              :    */
     406              :   void width(size_t w);
     407              : 
     408              :   /**
     409              :    * @brief Get the Dim object
     410              :    *
     411              :    * @return const size_t* array of size[MAXDIM]
     412              :    */
     413              :   const size_t *getDim() const;
     414              : 
     415              :   /**
     416              :    * @brief calculate tranposed dimension
     417              :    * @note In this function, batch direction is not considered, so channel is 0
     418              :    * @todo make batch 0
     419              :    *
     420              :    * @param direction  direction to transpose
     421              :    * @return TensorDim calculated dimension
     422              :    */
     423              :   TensorDim transpose(const std::string &direction) const;
     424              : 
     425              :   /**
     426              :    * @brief calculate trasposed dimension
     427              :    * @note In this function, batch direction is considered 0
     428              :    *
     429              :    * @param axes axes to be transposed
     430              :    * @return TensorDim calculated dimension
     431              :    */
     432              :   TensorDim transpose(const std::array<size_t, MAXDIM> &axes) const;
     433              : 
     434              :   /**
     435              :    * @brief Get the Tensor dimension for an axis
     436              :    *
     437              :    * @param idx axis to get
     438              :    * @return const size_t dimension of the given axis
     439              :    */
     440              :   const size_t getTensorDim(unsigned int idx) const;
     441              : 
     442              :   /**
     443              :    * @brief Set the Tensor Dim object
     444              :    *
     445              :    * @param idx axis to set
     446              :    * @param value value to set
     447              :    */
     448              :   void setTensorDim(unsigned int idx, size_t value);
     449              : 
     450              :   /**
     451              :    * @brief Set the Tensor Dim object
     452              :    *
     453              :    * @param input_shape input_shape
     454              :    * @param fm NCHW | NHWC
     455              :    * @return int ML_ERROR_NONE if successs
     456              :    */
     457              :   int setTensorDim(const std::string &input_shape,
     458              :                    TensorType t_type_ = TensorType());
     459              : 
     460              :   /**
     461              :    * @brief copy assign a dimension
     462              :    *
     463              :    * @param rhs other side to copy assign
     464              :    * @return TensorDim& tensor dimension
     465              :    */
     466              :   TensorDim &operator=(const TensorDim &rhs);
     467              : 
     468              :   /**
     469              :    * @brief check if tensor dims are equal
     470              :    *
     471              :    * @param rhs other side to compare
     472              :    * @retval true equal
     473              :    * @retval false not equal
     474              :    */
     475              :   bool operator==(const TensorDim &rhs) const;
     476              : 
     477              :   /**
     478              :    * @brief check if tensor dims are not equal
     479              :    *
     480              :    * @param rhs other side to compare
     481              :    * @retval true not equal
     482              :    * @retval false equal
     483              :    */
     484              :   bool operator!=(const TensorDim &rhs) const;
     485              : 
     486              :   /**
     487              :    * @brief check if given tensor dimension is empty
     488              :    *
     489              :    * @retval true empty
     490              :    * @retval false not empty
     491              :    */
     492              :   bool isEmpty() const;
     493              : 
     494              :   /**
     495              :    * @brief get index rank (dimension of 1 is considered not valid here)
     496              :    *
     497              :    * @return unsigned int calculated index
     498              :    */
     499              :   unsigned int rank() const;
     500              : 
     501              :   /**
     502              :    * @brief operator[] to get index from tensor_dim
     503              :    *
     504              :    * @param index index
     505              :    * @return unsigned int& returned index reference
     506              :    */
     507              :   size_t &operator[](const unsigned int index);
     508              : 
     509              :   /**
     510              :    * @brief operator[] to get index from tensor_dim
     511              :    *
     512              :    * @param index index
     513              :    * @return const size_t& returned index reference
     514              :    */
     515              :   const size_t &operator[](const unsigned int index) const;
     516              : 
     517              :   /**
     518              :    * @brief Calculate standard strides
     519              :    *
     520              :    * @return std::array <unsigned int, MAXDIM>
     521              :    */
     522              :   std::array<size_t, MAXDIM> computeStrides() const;
     523              : 
     524              :   /**
     525              :    * @brief reverse the dimensions inplace
     526              :    */
     527              :   void reverse();
     528              : 
     529              :   /**
     530              :    * @brief Get the Effective Dimension of the current
     531              :    * @note dynamic dimension is returned as -1
     532              :    *
     533              :    * @param dynamic if dimension has to be considering dynamic set this to ture
     534              :    * @return std::vector<int> integer vector
     535              :    */
     536              :   std::vector<int> getEffectiveDimension(bool dynamic = false) const;
     537              : 
     538              :   /**
     539              :    * @brief check if tensor is dynamic
     540              :    *
     541              :    * @retval true any of dyn_dim_flag is set
     542              :    * @retval false none of dyn_dim_flag is set
     543              :    */
     544              :   bool is_dynamic() const;
     545              : 
     546              :   /**
     547              :    * @brief getFormat
     548              :    *
     549              :    */
     550    105394020 :   TensorDim::Format getFormat() const { return t_type.format; };
     551              : 
     552              :   /**
     553              :    * @brief getType
     554              :    *
     555              :    */
     556       485134 :   TensorDim::DataType getDataType() const { return t_type.data_type; };
     557              : 
     558              :   /**
     559              :    * @brief getStorageOrder
     560              :    *
     561              :    */
     562              :   TensorDim::StorageOrder getStorageOrder() const {
     563       171220 :     return t_type.storage_order;
     564              :   };
     565              : 
     566              :   /**
     567              :    * @brief setFormat
     568              :    *
     569              :    */
     570         3911 :   void setFormat(TensorDim::Format fm) { t_type.format = fm; };
     571              : 
     572              :   /**
     573              :    * @brief setDataType
     574              :    *
     575              :    */
     576         8330 :   void setDataType(TensorDim::DataType ty) { t_type.data_type = ty; };
     577              : 
     578              :   /**
     579              :    * @brief setDataType
     580              :    *
     581              :    */
     582              :   void setStorageOrder(TensorDim::StorageOrder storage_order_) {
     583              :     t_type.storage_order = storage_order_;
     584              :   };
     585              : 
     586              :   /**
     587              :    * @brief getFormat
     588              :    *
     589              :    */
     590       229457 :   TensorType getTensorType() const { return t_type; };
     591              : 
     592              :   /**
     593              :    * @brief setTensorType
     594              :    *
     595              :    */
     596         1344 :   void setTensorType(TensorType tt) { t_type = tt; };
     597              : 
     598              : private:
     599              :   /**
     600              :    * @brief reset length
     601              :    *
     602              :    */
     603              :   void resetLen();
     604              : 
     605              :   TensorType t_type;
     606              : 
     607              :   std::bitset<MAXDIM> eff_dim_flag; /**< dimension bit flag to define effective
     608              :           dimension size */
     609              : 
     610              :   std::bitset<MAXDIM> dyn_dim_flag; /**< dimension bit flag to define
     611              : dynamic dimension size */
     612              : 
     613              :   size_t dim[MAXDIM]; /**< underlying dimension type */
     614              :   size_t len;         /**< number of elements */
     615              :   size_t feature_len; /**< number of feature elements */
     616              : };
     617              : 
     618              : /**
     619              :  * @brief operator<< to print TensorDim
     620              :  *
     621              :  * @param out ostream
     622              :  * @param d dimension to print
     623              :  * @return std::ostream& ostream
     624              :  */
     625              : std::ostream &operator<<(std::ostream &out, TensorDim const &d);
     626              : 
     627              : } /* namespace train */
     628              : } /* namespace ml */
     629              : 
     630              : #endif /* __cplusplus */
     631              : #endif /* __TENSOR_DIM_H__ */
        

Generated by: LCOV version 2.0-1