LCOV - code coverage report
Current view: top level - nntrainer/layers - attention_layer.h (source / functions) Coverage Total Hit
Test: coverage_filtered.info Lines: 50.0 % 4 2
Test Date: 2025-12-14 20:38:17 Functions: 66.7 % 3 2

            Line data    Source code
       1              : // SPDX-License-Identifier: Apache-2.0
       2              : /**
       3              :  * Copyright (C) 2021 Parichay Kapoor <pk.kapoor@samsung.com>
       4              :  *
       5              :  * @file   attention_layer.h
       6              :  * @date   1 October 2021
       7              :  * @see    https://github.com/nnstreamer/nntrainer
       8              :  * @author Parichay Kapoor <pk.kapoor@samsung.com>
       9              :  * @bug    No known bugs except for NYI items
      10              :  * @brief  This is Attention Layer Class for Neural Network
      11              :  *
      12              :  */
      13              : 
      14              : #ifndef __ATTENTION_LAYER_H__
      15              : #define __ATTENTION_LAYER_H__
      16              : #ifdef __cplusplus
      17              : 
      18              : #include <acti_func.h>
      19              : #include <common_properties.h>
      20              : #include <layer_devel.h>
      21              : #include <limits>
      22              : 
      23              : namespace nntrainer {
      24              : 
      25              : /**
      26              :  * @class   Attention Layer
      27              :  * @brief   Attention Layer
      28              :  */
      29              : class AttentionLayer : public virtual Layer {
      30              : public:
      31              :   /**
      32              :    * @brief     Constructor of Attention Layer
      33              :    */
      34              :   AttentionLayer();
      35              : 
      36              :   /**
      37              :    * @brief     Destructor of Attention Layer
      38              :    */
      39              :   ~AttentionLayer();
      40              : 
      41              :   /**
      42              :    *  @brief  Move constructor of AttentionLayer.
      43              :    *  @param[in] AttentionLayer &&
      44              :    */
      45              :   AttentionLayer(AttentionLayer &&rhs) noexcept = default;
      46              : 
      47              :   /**
      48              :    * @brief  Move assignment operator.
      49              :    * @parma[in] rhs AttentionLayer to be moved.
      50              :    */
      51              :   AttentionLayer &operator=(AttentionLayer &&rhs) = default;
      52              : 
      53              :   /**
      54              :    * @copydoc Layer::finalize(InitLayerContext &context)
      55              :    */
      56              :   void finalize(InitLayerContext &context) override;
      57              : 
      58              :   /**
      59              :    * @copydoc Layer::forwarding(RunLayerContext &context, bool training)
      60              :    */
      61              :   void forwarding(RunLayerContext &context, bool training) override;
      62              : 
      63              :   /**
      64              :    * @copydoc Layer::incremental_forwarding(RunLayerContext &context, unsigned
      65              :    * int from, unsigned int to, bool training)
      66              :    */
      67              :   void incremental_forwarding(RunLayerContext &context, unsigned int from,
      68              :                               unsigned int to, bool training) override;
      69              : 
      70              :   /**
      71              :    * @copydoc Layer::calcDerivative(RunLayerContext &context)
      72              :    */
      73              :   void calcDerivative(RunLayerContext &context) override;
      74              : 
      75              :   /**
      76              :    * @copydoc bool supportBackwarding() const
      77              :    */
      78            2 :   bool supportBackwarding() const override { return true; };
      79              : 
      80              :   /**
      81              :    * @copydoc Layer::exportTo(Exporter &exporter, ml::train::ExportMethods
      82              :    * method)
      83              :    */
      84            0 :   void exportTo(Exporter &exporter,
      85            0 :                 const ml::train::ExportMethods &method) const override {}
      86              : 
      87              :   /**
      88              :    * @copydoc Layer::setProperty(const std::vector<std::string> &values)
      89              :    */
      90              :   void setProperty(const std::vector<std::string> &values) override;
      91              : 
      92              :   /**
      93              :    * @copydoc Layer::getType()
      94              :    */
      95           27 :   const std::string getType() const override { return AttentionLayer::type; };
      96              : 
      97              :   /**
      98              :    * @copydoc Layer::setBatch(RunLayerContext &context, unsigned int batch)
      99              :    */
     100              :   void setBatch(RunLayerContext &context, unsigned int batch) override;
     101              : 
     102              :   static constexpr const char *type = "attention";
     103              : 
     104              : protected:
     105              :   /**
     106              :    * @brief     Finalize the attention layer with the given context
     107              :    * @param[in] context InitLayerContext
     108              :    *
     109              :    * @note This function provides the basic finalize details which can be shared
     110              :    * with derived classes as well
     111              :    */
     112              :   void finalizeCommon(InitLayerContext &context);
     113              : 
     114              :   std::tuple<props::ScaledDotProduct, props::CausalMask> attention_props;
     115              : 
     116              : private:
     117              :   ActiFunc sm;                        /** softmax activation operation */
     118              :   std::array<unsigned int, 4> wt_idx; /**< indices of the weights and tensors */
     119              : };
     120              : 
     121              : } // namespace nntrainer
     122              : 
     123              : #endif /* __cplusplus */
     124              : #endif /* __ATTENTION_LAYER_H__ */
        

Generated by: LCOV version 2.0-1