Line data Source code
1 : // SPDX-License-Identifier: Apache-2.0
2 : /**
3 : * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
4 : *
5 : * @file preprocess_flip_layer.cpp
6 : * @date 20 January 2020
7 : * @see https://github.com/nnstreamer/nntrainer
8 : * @author Parichay Kapoor <pk.kapoor@samsung.com>
9 : * @bug No known bugs except for NYI items
10 : * @brief This is Preprocess Random Flip Layer Class for Neural Network
11 : *
12 : */
13 :
14 : #include <random>
15 :
16 : #include <common_properties.h>
17 : #include <layer_context.h>
18 : #include <nntrainer_error.h>
19 : #include <nntrainer_log.h>
20 : #include <node_exporter.h>
21 : #include <preprocess_flip_layer.h>
22 : #include <util_func.h>
23 :
24 : namespace nntrainer {
25 :
26 33 : PreprocessFlipLayer::PreprocessFlipLayer() :
27 : Layer(),
28 66 : preprocess_flip_props(props::FlipDirection()) {}
29 :
30 4 : void PreprocessFlipLayer::finalize(InitLayerContext &context) {
31 4 : context.setOutputDimensions(context.getInputDimensions());
32 :
33 : rng.seed(0);
34 4 : flip_dist = std::uniform_real_distribution<float>(0.0, 1.0);
35 4 : }
36 :
37 63 : void PreprocessFlipLayer::setProperty(const std::vector<std::string> &values) {
38 63 : auto remain_props = loadProperties(values, preprocess_flip_props);
39 52 : NNTR_THROW_IF(!remain_props.empty(), std::invalid_argument)
40 2 : << "[PreprocessFilpLayer] Unknown Layer Properties count " +
41 4 : std::to_string(values.size());
42 52 : }
43 :
44 0 : void PreprocessFlipLayer::forwarding(RunLayerContext &context, bool training) {
45 : props::FlipDirectionInfo::Enum flipdirection =
46 0 : std::get<props::FlipDirection>(preprocess_flip_props).get();
47 :
48 0 : if (!training) {
49 0 : for (unsigned int idx = 0; idx < context.getNumInputs(); idx++) {
50 : /** TODO: tell the graph to not include this when not training */
51 0 : context.getOutput(idx) = context.getInput(idx);
52 : }
53 :
54 : return;
55 : }
56 :
57 : using std::swap;
58 : bool fliph, flipw;
59 :
60 0 : for (unsigned int idx = 0; idx < context.getNumInputs(); idx++) {
61 0 : Tensor &hidden_ = context.getOutput(idx);
62 0 : Tensor &input_ = context.getInput(idx);
63 0 : const TensorDim input_dim = input_.getDim();
64 0 : unsigned int width = input_dim.width();
65 0 : unsigned int height = input_dim.height();
66 :
67 0 : for (unsigned int b = 0; b < input_dim.batch(); b++) {
68 : fliph = flipw = false;
69 0 : if (flip_dist(rng) < 0.5 &&
70 : flipdirection != props::FlipDirectionInfo::Enum::vertical)
71 : flipw = true;
72 :
73 0 : if (flip_dist(rng) < 0.5 &&
74 : flipdirection != props::FlipDirectionInfo::Enum::horizontal)
75 : fliph = true;
76 :
77 0 : if (!flipw && !fliph)
78 0 : continue;
79 :
80 0 : if (flipw) {
81 0 : for (unsigned int c = 0; c < input_dim.channel(); c++)
82 0 : for (unsigned int h = 0; h < input_dim.height(); h++)
83 0 : for (unsigned int w = 0; w < input_dim.width() / 2; w++)
84 0 : swap(*input_.getAddress<float>(b, c, h, w),
85 0 : *input_.getAddress<float>(b, c, h, width - w - 1));
86 : }
87 0 : if (fliph) {
88 0 : for (unsigned int c = 0; c < input_dim.channel(); c++)
89 0 : for (unsigned int h = 0; h < input_dim.height() / 2; h++)
90 0 : for (unsigned int w = 0; w < input_dim.width(); w++)
91 0 : swap(*input_.getAddress<float>(b, c, h, w),
92 0 : *input_.getAddress<float>(b, c, height - h - 1, w));
93 : }
94 : }
95 : /** @todo enable inPlace support for this layer */
96 0 : hidden_ = input_;
97 : }
98 : }
99 :
100 0 : void PreprocessFlipLayer::calcDerivative(RunLayerContext &context) {
101 : throw exception::not_supported(
102 0 : "calcDerivative for preprocess layer is not supported");
103 : }
104 :
105 0 : void PreprocessFlipLayer::exportTo(
106 : Exporter &exporter, const ml::train::ExportMethods &method) const {
107 0 : exporter.saveResult(preprocess_flip_props, method, this);
108 0 : }
109 :
110 : } /* namespace nntrainer */
|