LCOV - code coverage report
Current view: top level - nntrainer/layers - split_layer.cpp (source / functions) Coverage Total Hit
Test: coverage_filtered.info Lines: 100.0 % 83 83
Test Date: 2025-12-14 20:38:17 Functions: 100.0 % 6 6

            Line data    Source code
       1              : // SPDX-License-Identifier: Apache-2.0
       2              : /**
       3              :  * Copyright (C) 2021 Parichay Kapoor <pk.kapoor@samsung.com>
       4              :  *
       5              :  * @file   split_layer.cpp
       6              :  * @date   21 May 2021
       7              :  * @see    https://github.com/nnstreamer/nntrainer
       8              :  * @author Parichay Kapoor <pk.kapoor@samsung.com>
       9              :  * @bug    No known bugs except for NYI items
      10              :  * @brief  This is Split Layer Class for Neural Network
      11              :  *
      12              :  */
      13              : 
      14              : #include <cstring>
      15              : #include <layer_context.h>
      16              : #include <nntrainer_error.h>
      17              : #include <nntrainer_log.h>
      18              : #include <node_exporter.h>
      19              : #include <split_layer.h>
      20              : #include <util_func.h>
      21              : 
      22              : namespace nntrainer {
      23              : 
      24              : static constexpr size_t SINGLE_INOUT_IDX = 0;
      25              : 
      26           74 : SplitLayer::SplitLayer() :
      27              :   Layer(),
      28           74 :   leading_helper_dim(1),
      29           74 :   split_props(props::SplitDimension(), props::SplitNumber()) {}
      30              : 
      31           37 : void SplitLayer::finalize(InitLayerContext &context) {
      32           37 :   NNTR_THROW_IF(context.getNumInputs() != 1, std::invalid_argument)
      33              :     << "Error: only a single input is supported with split layer";
      34              : 
      35           37 :   unsigned int split_dimension = std::get<props::SplitDimension>(split_props);
      36              : 
      37              :   const TensorDim &in_dim = context.getInputDimensions()[0];
      38              : 
      39           37 :   if (std::get<props::SplitNumber>(split_props).empty()) {
      40              :     std::get<props::SplitNumber>(split_props)
      41           15 :       .set(in_dim.getTensorDim(split_dimension));
      42              :   }
      43           37 :   unsigned int split_number = std::get<props::SplitNumber>(split_props);
      44              : 
      45              :   /**
      46              :    * The split is only done along the split_dimension dimension.
      47              :    * (Assumes input data is continuous)
      48              :    * For example, consider input dimension [b,c,h,w], split_number = n
      49              :    * 1. axis = 1, output_dim = [b,c//n,h,w], num_outputs = n
      50              :    * 2. axis = 2, output_dim = [b,c,h//n,w], num_outputs = n
      51              :    * 3. axis = 3, output_dim = [b,c,h,w//n], num_outputs = n
      52              :    */
      53           37 :   NNTR_THROW_IF(split_number != context.getNumRequestedOutputs(),
      54              :                 std::invalid_argument)
      55              :     << "Given split number does not match with number of outputs";
      56              : 
      57           37 :   NNTR_THROW_IF(in_dim.getTensorDim(split_dimension) % split_number,
      58              :                 std::invalid_argument)
      59              :     << "Split dimension cannot be split into given number of split_number";
      60              : 
      61              :   const unsigned int split_size =
      62           37 :     in_dim.getTensorDim(split_dimension) / split_number;
      63              : 
      64           37 :   TensorDim d = in_dim;
      65           37 :   d.setTensorDim(split_dimension, split_size);
      66              : 
      67           37 :   std::vector<TensorDim> output_dim(context.getNumRequestedOutputs());
      68          139 :   for (auto &out_dim : output_dim) {
      69          102 :     out_dim = d;
      70              :   }
      71           37 :   context.setOutputDimensions(output_dim);
      72              : 
      73              :   /**
      74              :    * Setup input_reshape_helper to which input will be reshaped in forwarding
      75              :    * to facilitate easier processing.
      76              :    *
      77              :    * The helper shape consolidates all the dimensions before the split_dimension
      78              :    * together and all the dimensions after the split_dimension to facilitate
      79              :    * easier splitting of the data.
      80              :    */
      81           37 :   leading_helper_dim = 1;
      82           37 :   input_reshape_helper.channel(1);
      83           37 :   input_reshape_helper.height(1);
      84           37 :   input_reshape_helper.width(1);
      85          103 :   for (unsigned int idx = 1; idx < split_dimension; ++idx) {
      86           66 :     leading_helper_dim *= in_dim.getTensorDim(idx);
      87              :   }
      88              : 
      89           37 :   input_reshape_helper.height(in_dim.getTensorDim(split_dimension));
      90              : 
      91           45 :   for (unsigned int idx = split_dimension + 1;
      92           45 :        idx < ml::train::TensorDim::MAXDIM; ++idx) {
      93           16 :     input_reshape_helper.width(input_reshape_helper.width() *
      94            8 :                                in_dim.getTensorDim(idx));
      95              :   }
      96              : 
      97              :   /**
      98              :    * Setup output_reshape_helper to which input will be reshaped in forwarding
      99              :    * to facilitate easier processing.
     100              :    */
     101           37 :   output_reshape_helper = input_reshape_helper;
     102           37 :   output_reshape_helper.height(split_size);
     103              : 
     104           37 :   setBatch(in_dim.batch());
     105           37 : }
     106              : 
     107          249 : void SplitLayer::forwarding(RunLayerContext &context, bool training) {
     108          249 :   unsigned int split_number = std::get<props::SplitNumber>(split_props);
     109              : 
     110          249 :   Tensor &input_ = context.getInput(SINGLE_INOUT_IDX);
     111              : 
     112          249 :   const TensorDim in_dim = input_.getDim();
     113          249 :   input_.reshape(input_reshape_helper);
     114              : 
     115          797 :   for (unsigned int idx = 0; idx < split_number; idx++) {
     116          548 :     Tensor &output_ = context.getOutput(idx);
     117          548 :     const TensorDim out_dim = output_.getDim();
     118          548 :     output_.reshape(output_reshape_helper);
     119              : 
     120         9894 :     for (unsigned int batch = 0; batch < input_.batch(); batch++) {
     121              :       const Tensor source_tensor = Tensor::Map(
     122         9346 :         input_.getAddress(batch, 0, idx * output_reshape_helper.height(), 0),
     123         9346 :         output_reshape_helper.height() * input_reshape_helper.width() *
     124              :           sizeof(float),
     125        18692 :         {1, 1, output_reshape_helper.height(), input_reshape_helper.width()});
     126              :       Tensor dest_tensor = Tensor::Map(
     127              :         output_.getAddress(batch, 0, 0, 0),
     128         9346 :         output_reshape_helper.height() * output_reshape_helper.width() *
     129              :           sizeof(float),
     130        18692 :         {1, 1, output_reshape_helper.height(), output_reshape_helper.width()});
     131         9346 :       dest_tensor.copy(source_tensor);
     132         9346 :     }
     133              : 
     134          548 :     output_.reshape(out_dim);
     135              :   }
     136              : 
     137          249 :   input_.reshape(in_dim);
     138          249 : }
     139              : 
     140           25 : void SplitLayer::calcDerivative(RunLayerContext &context) {
     141           25 :   unsigned int split_number = std::get<props::SplitNumber>(split_props);
     142              : 
     143           25 :   Tensor &input_ = context.getOutgoingDerivative(SINGLE_INOUT_IDX);
     144              : 
     145           25 :   const TensorDim in_dim = input_.getDim();
     146           25 :   input_.reshape(input_reshape_helper);
     147              : 
     148          101 :   for (unsigned int idx = 0; idx < split_number; idx++) {
     149           76 :     Tensor output_ = context.getIncomingDerivative(idx);
     150           76 :     const TensorDim out_dim = output_.getDim();
     151           76 :     output_.reshape(output_reshape_helper);
     152              : 
     153          759 :     for (unsigned int batch = 0; batch < input_.batch(); batch++) {
     154              :       Tensor dest_tensor = Tensor::Map(
     155          683 :         input_.getAddress(batch, 0, idx * output_reshape_helper.height(), 0),
     156          683 :         output_reshape_helper.height() * input_reshape_helper.width() *
     157              :           sizeof(float),
     158         1366 :         {1, 1, output_reshape_helper.height(), input_reshape_helper.width()});
     159              :       const Tensor source_tensor = Tensor::Map(
     160              :         output_.getAddress(batch, 0, 0, 0),
     161          683 :         output_reshape_helper.height() * output_reshape_helper.width() *
     162              :           sizeof(float),
     163         1366 :         {1, 1, output_reshape_helper.height(), output_reshape_helper.width()});
     164          683 :       dest_tensor.copy(source_tensor);
     165          683 :     }
     166              : 
     167           76 :     output_.reshape(out_dim);
     168           76 :   }
     169              : 
     170           25 :   input_.reshape(in_dim);
     171           25 : }
     172              : 
     173           44 : void SplitLayer::exportTo(Exporter &exporter,
     174              :                           const ml::train::ExportMethods &method) const {
     175           44 :   exporter.saveResult(split_props, method, this);
     176           44 : }
     177              : 
     178          270 : void SplitLayer::setProperty(const std::vector<std::string> &values) {
     179          270 :   auto remain_props = loadProperties(values, split_props);
     180          269 :   NNTR_THROW_IF(!remain_props.empty(), std::invalid_argument)
     181            2 :     << "[SplitLayer] Unknown Layer Properties count " +
     182            4 :          std::to_string(values.size());
     183          269 : }
     184              : 
     185              : } /* namespace nntrainer */
        

Generated by: LCOV version 2.0-1