Line data Source code
1 : // SPDX-License-Identifier: Apache-2.0
2 : /**
3 : * Copyright (C) 2021 Jihoon Lee <jhoon.it.lee@samsung.com>
4 : *
5 : * @file node_exporter.cpp
6 : * @date 09 April 2021
7 : * @brief NNTrainer Node exporter
8 : * @see https://github.com/nnstreamer/nntrainer
9 : * @author Jihoon Lee <jhoon.it.lee@samsung.com>
10 : * @author Donghak Park <donghak.park@samsung.com>
11 : * @bug No known bugs except for NYI items
12 : */
13 : #include <node_exporter.h>
14 :
15 : #ifdef ENABLE_TFLITE_INTERPRETER
16 : #include <activation_layer.h>
17 : #include <bitset>
18 : #include <common_properties.h>
19 : #include <fc_layer.h>
20 : #include <map>
21 : #include <node_exporter.h>
22 : #include <tf_schema_generated.h>
23 : #include <tflite_opnode.h>
24 : #endif
25 :
26 : namespace {
27 :
28 : #ifdef ENABLE_TFLITE_INTERPRETER
29 7 : tflite::Padding tflite_padding(const std::string &padding) {
30 : std::map<std::string, tflite::Padding> m = {{"same", tflite::Padding_SAME},
31 21 : {"valid", tflite::Padding_VALID}};
32 7 : return m[padding];
33 0 : }
34 : #endif
35 :
36 : } // namespace
37 :
38 : namespace nntrainer {
39 :
40 : constexpr const unsigned int CONV2D_DIM = 2;
41 : constexpr const unsigned int POOLING2D_DIM = 2;
42 :
43 : /**
44 : * @brief Construct a new Exporter object
45 : *
46 : */
47 3009 : Exporter::Exporter() : stored_result(nullptr), is_exported(false) {
48 : #ifdef ENABLE_TFLITE_INTERPRETER
49 : tf_node = nullptr;
50 3009 : fbb = nullptr;
51 : #endif
52 3009 : }
53 :
54 : #ifdef ENABLE_TFLITE_INTERPRETER
55 : /**
56 : * @brief Construct a new Exporter object with flatbuffer builder
57 : *
58 : */
59 22 : Exporter::Exporter(flatbuffers::FlatBufferBuilder *fbb) :
60 22 : fbb(fbb), stored_result(nullptr), is_exported(false) {}
61 : #endif
62 :
63 : /**
64 : * @brief Destroy the Exporter object
65 : *
66 : */
67 3031 : Exporter::~Exporter() = default;
68 :
69 : template <>
70 : std::unique_ptr<std::vector<std::pair<std::string, std::string>>>
71 3008 : Exporter::getResult<ml::train::ExportMethods::METHOD_STRINGVECTOR>() {
72 3008 : return std::move(stored_result);
73 : }
74 :
75 : #ifdef ENABLE_TFLITE_INTERPRETER
76 : template <>
77 : std::unique_ptr<TfOpNode>
78 22 : Exporter::getResult<ml::train::ExportMethods::METHOD_TFLITE>() {
79 22 : tf_node->finalize();
80 22 : return std::move(tf_node);
81 : }
82 :
83 : template <>
84 0 : void Exporter::saveTflResult(const std::tuple<> &props,
85 : const nntrainer::Layer *self) {
86 0 : createIfNull(tf_node);
87 0 : }
88 :
89 : template <>
90 22 : void Exporter::saveTflResult(
91 : const std::tuple<
92 : props::Name, props::Distribute, props::Trainable,
93 : std::vector<props::InputConnection>, std::vector<props::InputShape>,
94 : props::SharedFrom, props::ClipGradByGlobalNorm, props::Packed,
95 : props::WeightDtype, props::LossScaleForMixed, props::ComputeEngine> &props,
96 : const LayerNode *self) {
97 22 : createIfNull(tf_node);
98 22 : tf_node->setLayerNode(*self);
99 22 : }
100 :
101 : template <>
102 8 : void Exporter::saveTflResult(
103 : const std::tuple<props::WeightRegularizer, props::WeightRegularizerConstant,
104 : props::WeightInitializer, props::WeightDecay,
105 : props::BiasDecay, props::BiasInitializer, props::DisableBias,
106 : props::Print> &props,
107 : const LayerImpl *self) { /// layer impl has nothing to serialize so do nothing
108 8 : }
109 :
110 : template <>
111 6 : void Exporter::saveTflResult(
112 : const std::tuple<props::Unit, props::LoraRank, props::LoraAlpha> &props,
113 : const FullyConnectedLayer *self) {
114 6 : createIfNull(tf_node);
115 : tf_node->setOpType(tflite::BuiltinOperator_FULLY_CONNECTED);
116 6 : auto options = tflite::CreateFullyConnectedOptions(*fbb).Union();
117 6 : tf_node->setBuiltinOptions(tflite::BuiltinOptions_FullyConnectedOptions,
118 : options);
119 6 : }
120 :
121 : template <>
122 3 : void Exporter::saveTflResult(const std::tuple<props::Activation> &props,
123 : const ActivationLayer *self) {
124 3 : createIfNull(tf_node);
125 :
126 : auto activation = std::get<props::Activation>(props);
127 3 : switch (activation.get()) {
128 2 : case ActivationType::ACT_RELU: {
129 : tf_node->setOpType(tflite::BuiltinOperator_RELU);
130 2 : tf_node->setBuiltinOptions(tflite::BuiltinOptions_NONE,
131 2 : flatbuffers::Offset<void>() /** no options **/);
132 2 : break;
133 : }
134 1 : case ActivationType::ACT_SOFTMAX: {
135 : tf_node->setOpType(tflite::BuiltinOperator_SOFTMAX);
136 1 : auto options = tflite::CreateSoftmaxOptions(*fbb, 1.0).Union();
137 1 : tf_node->setBuiltinOptions(tflite::BuiltinOptions_SoftmaxOptions, options);
138 : break;
139 : }
140 0 : default:
141 0 : throw std::runtime_error{"Unsupported activation type"};
142 : }
143 3 : }
144 :
145 : template <>
146 0 : void Exporter::saveTflResult(
147 : const std::tuple<props::Epsilon, props::MuInitializer, props::VarInitializer,
148 : props::BetaInitializer, props::GammaInitializer,
149 : props::Momentum, props::Axis, props::WeightDecay,
150 : props::BiasDecay> &props,
151 : const BatchNormalizationLayer *self) {
152 0 : createIfNull(tf_node);
153 :
154 0 : auto epsilon = std::get<props::Epsilon>(props).get();
155 : tf_node->AppendAdditionalProps(epsilon);
156 :
157 : tf_node->setOpType(tflite::BuiltinOperator_MUL);
158 : auto options =
159 0 : tflite::CreateMulOptions(*fbb, tflite::ActivationFunctionType_NONE).Union();
160 0 : tf_node->setBuiltinOptions(tflite::BuiltinOptions_MulOptions, options);
161 0 : }
162 :
163 : template <>
164 2 : void Exporter::saveTflResult(
165 : const std::tuple<props::FilterSize, std::array<props::KernelSize, CONV2D_DIM>,
166 : std::array<props::Stride, CONV2D_DIM>, props::Padding2D,
167 : std::array<props::Dilation, CONV2D_DIM>> &props,
168 : const Conv2DLayer *self) {
169 2 : createIfNull(tf_node);
170 :
171 2 : auto weight_transform = [](std::vector<const Tensor *> &old_weights) {
172 : std::vector<Tensor> new_weights;
173 :
174 2 : auto &filter_weight = *old_weights[0];
175 : // tflite filter has shape format {channel_out, height, width, channel_in}
176 2 : Tensor filter(filter_weight.transpose("1:2:0"));
177 2 : new_weights.push_back(filter);
178 :
179 2 : auto &bias_weight = *old_weights[1];
180 2 : TensorDim bias_dim{bias_weight.getTensorType(), std::bitset<4>(0b0001)};
181 2 : bias_dim.setTensorDim(
182 : 3 /** index **/,
183 : bias_weight
184 : .channel() /** value **/); // effective dimension = {bias->channel()}
185 2 : Tensor bias(bias_dim);
186 2 : bias.copyData(bias_weight.transpose("1:2:0"));
187 2 : bias.setName(bias_weight.getName());
188 :
189 2 : new_weights.push_back(bias);
190 :
191 2 : return new_weights;
192 2 : };
193 4 : tf_node->setWeightTransformFn(weight_transform);
194 :
195 : tf_node->setOpType(tflite::BuiltinOperator_CONV_2D);
196 :
197 : auto &strides = std::get<std::array<props::Stride, CONV2D_DIM>>(props);
198 : assert(strides.size() == CONV2D_DIM);
199 2 : const auto &padding = std::get<props::Padding2D>(props).get();
200 2 : if (padding != "same" && padding != "valid") {
201 0 : std::ostringstream ss;
202 : ss << "Unsupported padding type; \"" << padding
203 0 : << "\" is not supported. Use \"same\" or \"valid\".";
204 0 : throw std::runtime_error(ss.str());
205 0 : }
206 2 : auto options = tflite::CreateConv2DOptions(*fbb, tflite_padding(padding),
207 : strides.at(0), strides.at(1))
208 2 : .Union();
209 :
210 2 : tf_node->AppendProps(tflite_padding(padding));
211 2 : tf_node->AppendProps(strides.at(0));
212 2 : tf_node->AppendProps(strides.at(1));
213 :
214 2 : tf_node->setBuiltinOptions(tflite::BuiltinOptions_Conv2DOptions, options);
215 2 : }
216 :
217 : template <>
218 5 : void Exporter::saveTflResult(
219 : const std::tuple<props::Normalization, props::Standardization> &props,
220 : const InputLayer *self) {
221 5 : createIfNull(tf_node);
222 : // input layer exports to Transpose operator (NCHW -> NHWC)
223 : tf_node->setOpType(tflite::BuiltinOperator_TRANSPOSE);
224 5 : tf_node->setBuiltinOptions(tflite::BuiltinOptions_TransposeOptions,
225 5 : flatbuffers::Offset<void>());
226 :
227 5 : auto input_transform = [](std::vector<const Tensor *> &inputs) {
228 : std::vector<Tensor> new_inputs;
229 5 : assert(inputs.size() == 1);
230 5 : new_inputs.reserve(inputs.size() + 1 /** perm **/);
231 5 : new_inputs.push_back(*inputs[0]);
232 : // create "perm" tensor for Transpose operator
233 : // @todo : This NCHW format setting is just temporal, it needs to be set by
234 : // global configuration
235 5 : TensorDim perm_dim{inputs[0]->getTensorType(), std::bitset<4>(0b0001)};
236 5 : perm_dim.setTensorDim(3 /** index **/,
237 : 4 /** value **/); // effective dimension = {4}
238 5 : new_inputs.emplace_back(perm_dim);
239 : auto &perm = new_inputs.back();
240 10 : perm.setName("nntrainer_internal_perm");
241 : perm.setValueInt(0, 0 /** N **/);
242 : perm.setValueInt(1, 2 /** H **/);
243 : perm.setValueInt(2, 3 /** W **/);
244 : perm.setValueInt(3, 1 /** C **/);
245 5 : return new_inputs;
246 0 : };
247 10 : tf_node->setInputTransformFn(input_transform);
248 :
249 5 : assert(tf_node->getOutputs().size() == 1);
250 5 : auto output_tensor = const_cast<Tensor *>(tf_node->getOutputs()[0]);
251 : // Transpose op needs buffer
252 5 : output_tensor->allocate();
253 5 : }
254 :
255 : template <>
256 3 : void Exporter::saveTflResult(
257 : const std::tuple<props::PoolingType, std::vector<props::PoolSize>,
258 : std::array<props::Stride, POOLING2D_DIM>, props::Padding2D>
259 : &props,
260 : const Pooling2DLayer *self) {
261 3 : createIfNull(tf_node);
262 :
263 : auto poolingType = std::get<props::PoolingType>(props);
264 3 : auto strides = std::get<std::array<props::Stride, POOLING2D_DIM>>(props);
265 : assert(strides.size() == POOLING2D_DIM);
266 3 : auto poolSize = std::get<std::vector<props::PoolSize>>(props);
267 3 : assert(poolSize.size() == POOLING2D_DIM);
268 3 : const auto &padding = std::get<props::Padding2D>(props).get();
269 3 : assert(padding == "same" || padding == "valid");
270 :
271 3 : switch (poolingType.get()) {
272 3 : case props::PoolingTypeInfo::Enum::average: {
273 : tf_node->setOpType(tflite::BuiltinOperator_AVERAGE_POOL_2D);
274 : auto options =
275 3 : tflite::CreatePool2DOptions(*fbb, tflite_padding(padding), strides.at(0),
276 : strides.at(1), poolSize.at(0), poolSize.at(1))
277 3 : .Union();
278 3 : tf_node->setBuiltinOptions(tflite::BuiltinOptions_Pool2DOptions, options);
279 : break;
280 : }
281 0 : default:
282 0 : throw std::runtime_error{"Unsupported pooling type"};
283 : }
284 6 : }
285 :
286 : template <>
287 1 : void Exporter::saveTflResult(const std::tuple<props::TargetShape> &props,
288 : const ReshapeLayer *self) {
289 1 : createIfNull(tf_node);
290 :
291 : tf_node->setOpType(tflite::BuiltinOperator_RESHAPE);
292 1 : const auto &targetShape = std::get<props::TargetShape>(props).get();
293 : std::vector<int32_t> new_shape_vec = {
294 1 : static_cast<int32_t>(targetShape.batch()),
295 1 : static_cast<int32_t>(targetShape.height()),
296 1 : static_cast<int32_t>(targetShape.width()),
297 1 : static_cast<int32_t>(targetShape.channel())};
298 1 : auto new_shape = fbb->CreateVector(new_shape_vec);
299 1 : auto options = tflite::CreateReshapeOptions(*fbb, new_shape).Union();
300 1 : tf_node->setBuiltinOptions(tflite::BuiltinOptions_ReshapeOptions, options);
301 1 : }
302 :
303 : template <>
304 2 : void Exporter::saveTflResult(const std::tuple<props::TargetShape> &props,
305 : const FlattenLayer *self) {
306 2 : createIfNull(tf_node);
307 :
308 : tf_node->setOpType(tflite::BuiltinOperator_RESHAPE);
309 2 : auto &targetShape = std::get<props::TargetShape>(props).get();
310 :
311 : /// @todo new shape should be 2 rank {batch, channel * height * width}
312 : std::vector<int32_t> new_shape_vec = {
313 2 : static_cast<int32_t>(targetShape.batch()),
314 2 : static_cast<int32_t>(targetShape.height()),
315 2 : static_cast<int32_t>(targetShape.width()),
316 2 : static_cast<int32_t>(targetShape.channel())};
317 2 : auto new_shape = fbb->CreateVector(new_shape_vec);
318 2 : auto options = tflite::CreateReshapeOptions(*fbb, new_shape).Union();
319 2 : tf_node->setBuiltinOptions(tflite::BuiltinOptions_ReshapeOptions, options);
320 2 : }
321 :
322 : template <>
323 0 : void Exporter::saveTflResult(const std::tuple<> &props,
324 : const AdditionLayer *self) {
325 0 : createIfNull(tf_node);
326 :
327 : tf_node->setOpType(tflite::BuiltinOperator_ADD);
328 0 : auto options = tflite::CreateAddOptions(*fbb).Union();
329 0 : tf_node->setBuiltinOptions(tflite::BuiltinOptions_AddOptions, options);
330 0 : }
331 : #endif
332 :
333 : } // namespace nntrainer
|