Line data Source code
1 : // SPDX-License-Identifier: Apache-2.0
2 : /**
3 : * Copyright (C) 2024 SeungBaek Hong <sb92.hong@samsung.com>
4 : *
5 : * @file operation_layer.h
6 : * @date 4 Oct 2024
7 : * @see https://github.com/nnstreamer/nntrainer
8 : * @author SeungBaek Hong <sb92.hong@samsung.com>
9 : * @bug No known bugs except for NYI items
10 : * @brief This is common class for operation layers
11 : *
12 : */
13 : #ifndef __LAYER_OPERATION_H__
14 : #define __LAYER_OPERATION_H__
15 : #ifdef __cplusplus
16 :
17 : #include <layer_context.h>
18 : #include <layer_devel.h>
19 :
20 : namespace nntrainer {
21 :
22 : /**
23 : * @brief Base class for Unary Tensor Operation Layer
24 : *
25 : */
26 : class UnaryOperationLayer : public Layer {
27 : public:
28 : /**
29 : * @brief forwarding operation for unary input
30 : *
31 : */
32 : virtual void forwarding_operation(const Tensor &input, Tensor &hidden) = 0;
33 :
34 : /**
35 : * @brief copydoc Layer::forwarding(RunLayerContext &context, bool training)
36 : *
37 : */
38 36 : void forwarding(RunLayerContext &context, bool training) override {
39 36 : Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
40 :
41 36 : const Tensor input = context.getInput(0);
42 36 : forwarding_operation(input, hidden_);
43 36 : }
44 :
45 : /**
46 : * @copydoc Layer::incremental_forwarding(RunLayerContext &context, unsigned
47 : * int from, unsigned int to, bool training)
48 : *
49 : */
50 0 : void incremental_forwarding(RunLayerContext &context, unsigned int from,
51 : unsigned int to, bool training) override {
52 0 : if (from) {
53 0 : NNTR_THROW_IF(to - from != 1, std::invalid_argument)
54 : << "incremental step size is not 1";
55 : from = 0;
56 : to = 1;
57 : }
58 :
59 0 : Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
60 0 : TensorDim hidden_dim = hidden_.getDim();
61 0 : TensorDim hidden_step_dim = hidden_dim;
62 :
63 0 : hidden_step_dim.batch(1);
64 0 : hidden_step_dim.height(to - from);
65 :
66 0 : const Tensor &input = context.getInput(0);
67 0 : TensorDim input_dim = input.getDim();
68 0 : TensorDim input_step_dim = input_dim;
69 0 : input_step_dim.batch(1);
70 0 : input_step_dim.height(to - from);
71 :
72 0 : for (unsigned int b = 0; b < hidden_.batch(); ++b) {
73 : Tensor hidden_step = hidden_.getSharedDataTensor(
74 0 : hidden_step_dim, b * hidden_dim.getFeatureLen(), true);
75 :
76 : Tensor input_step = input.getSharedDataTensor(
77 0 : input_step_dim, b * input_dim.getFeatureLen(), true);
78 :
79 0 : forwarding_operation(input_step, hidden_step);
80 0 : }
81 0 : }
82 :
83 : static constexpr size_t SINGLE_INOUT_IDX = 0;
84 : };
85 :
86 : /**
87 : * @brief Base class for Binary Tensor Operation Layer
88 : *
89 : */
90 : class BinaryOperationLayer : public Layer {
91 : public:
92 : /**
93 : * @brief forwarding operation for binary inputs
94 : *
95 : */
96 : virtual void forwarding_operation(const Tensor &input0, const Tensor &input1,
97 : Tensor &hidden) = 0;
98 :
99 : /**
100 : * @brief copydoc Layer::forwarding(RunLayerContext &context, bool training)
101 : *
102 : */
103 30 : void forwarding(RunLayerContext &context, bool training) override {
104 30 : Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
105 :
106 30 : const Tensor &input0 = context.getInput(0);
107 30 : const Tensor &input1 = context.getInput(1);
108 30 : forwarding_operation(input0, input1, hidden_);
109 30 : }
110 :
111 : /**
112 : * @copydoc Layer::incremental_forwarding(RunLayerContext &context, unsigned
113 : * int from, unsigned int to, bool training)
114 : *
115 : */
116 0 : void incremental_forwarding(RunLayerContext &context, unsigned int from,
117 : unsigned int to, bool training) override {
118 0 : if (from) {
119 0 : NNTR_THROW_IF(to - from != 1, std::invalid_argument)
120 : << "incremental step size is not 1";
121 : from = 0;
122 : to = 1;
123 : }
124 :
125 0 : Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
126 0 : TensorDim hidden_dim = hidden_.getDim();
127 0 : TensorDim hidden_step_dim = hidden_dim;
128 :
129 0 : hidden_step_dim.batch(1);
130 0 : hidden_step_dim.height(to - from);
131 :
132 0 : const Tensor &input0 = context.getInput(0);
133 0 : const Tensor &input1 = context.getInput(1);
134 :
135 0 : TensorDim input0_dim = input0.getDim();
136 0 : TensorDim input1_dim = input1.getDim();
137 0 : if (input0_dim != input1_dim) {
138 : throw std::invalid_argument(
139 : "If the two input dimensions are different, the incremental "
140 0 : "forwarding implementation must be overridden.");
141 : }
142 :
143 0 : TensorDim input_step_dim = input0_dim;
144 0 : input_step_dim.batch(1);
145 0 : input_step_dim.height(to - from);
146 :
147 0 : for (unsigned int b = 0; b < hidden_.batch(); ++b) {
148 : Tensor hidden_step = hidden_.getSharedDataTensor(
149 0 : hidden_step_dim, b * hidden_dim.getFeatureLen(), true);
150 :
151 : Tensor input0_step = input0.getSharedDataTensor(
152 0 : input_step_dim, b * input0_dim.getFeatureLen(), true);
153 :
154 : Tensor input1_step = input1.getSharedDataTensor(
155 0 : input_step_dim, b * input1_dim.getFeatureLen(), true);
156 :
157 0 : forwarding_operation(input0_step, input1_step, hidden_step);
158 0 : }
159 0 : }
160 :
161 : static constexpr size_t SINGLE_INOUT_IDX = 0;
162 : };
163 : } // namespace nntrainer
164 :
165 : #endif /* __cplusplus */
166 : #endif /* __LAYER_OPERATION_H__ */
|