LCOV - code coverage report
Current view: top level - nntrainer/layers - weight_layer.cpp (source / functions) Coverage Total Hit
Test: coverage_filtered.info Lines: 44.8 % 29 13
Test Date: 2025-12-14 20:38:17 Functions: 37.5 % 8 3

            Line data    Source code
       1              : // SPDX-License-Identifier: Apache-2.0
       2              : /**
       3              :  * Copyright (C) 2024 SeungBaek Hong <sb92.hong@samsung.com>
       4              :  *
       5              :  * @file   weight_layer.cpp
       6              :  * @date   2 August 2024
       7              :  * @brief  This is a layer that simply stores a weight tensor without any
       8              :  * operation.
       9              :  * @see    https://github.com/nnstreamer/nntrainer
      10              :  * @author SeungBaek Hong <sb92.hong@samsung.com>
      11              :  * @bug    No known bugs except for NYI items
      12              :  *
      13              :  */
      14              : 
      15              : #include <common_properties.h>
      16              : #include <layer_context.h>
      17              : #include <lazy_tensor.h>
      18              : #include <nntrainer_error.h>
      19              : #include <nntrainer_log.h>
      20              : #include <node_exporter.h>
      21              : #include <util_func.h>
      22              : #include <weight_layer.h>
      23              : 
      24              : #include <iostream>
      25              : 
      26              : namespace nntrainer {
      27              : 
      28              : static constexpr size_t SINGLE_INOUT_IDX = 0;
      29          111 : WeightLayer::WeightLayer() : LayerImpl(), weight_props({}, {}, {}) {}
      30              : 
      31            8 : void WeightLayer::finalize(InitLayerContext &context) {
      32              :   auto &weight_regularizer =
      33              :     std::get<props::WeightRegularizer>(*layer_impl_props);
      34              :   auto &weight_regularizer_constant =
      35              :     std::get<props::WeightRegularizerConstant>(*layer_impl_props);
      36              :   auto &weight_initializer =
      37              :     std::get<props::WeightInitializer>(*layer_impl_props);
      38              :   auto &weight_decay = std::get<props::WeightDecay>(*layer_impl_props);
      39              : 
      40            8 :   const auto &weight_dim = std::get<props::TensorDimension>(weight_props).get();
      41              :   const auto &weight_dtype = std::get<props::TensorDataType>(weight_props);
      42              :   const auto &weight_name = std::get<props::WeightName>(weight_props);
      43              : 
      44            8 :   std::vector<TensorDim> output_dims(1);
      45              : 
      46            8 :   output_dims[SINGLE_INOUT_IDX] = weight_dim;
      47            8 :   output_dims[SINGLE_INOUT_IDX].setTensorType(
      48              :     {context.getFormat(), weight_dtype});
      49              : 
      50            8 :   context.setOutputDimensions(output_dims);
      51              : 
      52            8 :   weight_idx = context.requestWeight(
      53              :     weight_dim, weight_initializer, weight_regularizer,
      54              :     weight_regularizer_constant, weight_decay, weight_name, true);
      55            8 : }
      56              : 
      57            0 : void WeightLayer::exportTo(Exporter &exporter,
      58              :                            const ml::train::ExportMethods &method) const {
      59            0 :   LayerImpl::exportTo(exporter, method);
      60            0 :   exporter.saveResult(weight_props, method, this);
      61            0 : }
      62              : 
      63           68 : void WeightLayer::setProperty(const std::vector<std::string> &values) {
      64           68 :   auto remain_props = loadProperties(values, weight_props);
      65           66 :   LayerImpl::setProperty(remain_props);
      66           66 : }
      67              : 
      68            0 : void WeightLayer::forwarding(RunLayerContext &context, bool training) {
      69            0 :   Tensor &weight = context.getWeight(weight_idx);
      70            0 :   Tensor &output = context.getOutput(SINGLE_INOUT_IDX);
      71            0 :   output.copy(weight);
      72            0 : }
      73              : 
      74            0 : void WeightLayer::calcDerivative(RunLayerContext &context) {
      75              :   throw exception::not_supported(
      76            0 :     "calcDerivative for weight layer is not supported");
      77              : }
      78              : 
      79            0 : void WeightLayer::calcGradient(RunLayerContext &context) {
      80            0 :   Tensor &djdw = context.getWeightGrad(weight_idx);
      81            0 :   const Tensor &derivative_ = context.getIncomingDerivative(SINGLE_INOUT_IDX);
      82            0 :   djdw.copy(derivative_);
      83            0 : }
      84              : 
      85              : } /* namespace nntrainer */
        

Generated by: LCOV version 2.0-1