Line data Source code
1 : // SPDX-License-Identifier: Apache-2.0
2 : /**
3 : * Copyright (C) 2020 Jijoong Moon <jijoong.moon@samsung.com>
4 : */
5 : /**
6 : * @file tensor_dim.cpp
7 : * @date 22 May 2020
8 : * @brief This is Tensor Dimension Class
9 : * @see https://github.com/nnstreamer/nntrainer
10 : * @author Jijoong Moon <jijoong.moon@samsung.com>
11 : * @bug No known bugs except for NYI items
12 : *
13 : */
14 :
15 : #include <cstring>
16 : #include <regex>
17 : #include <sstream>
18 : #include <stdio.h>
19 :
20 : #include <nntrainer_error.h>
21 : #include <nntrainer_log.h>
22 : #include <tensor_dim.h>
23 : #include <util_func.h>
24 :
25 : namespace ml {
26 : namespace train {
27 :
28 658701 : TensorDim::TensorDim(TensorDim::Format fm, TensorDim::DataType d_type,
29 : const std::bitset<MAXDIM> &eff_dim_flag_,
30 658701 : const std::bitset<MAXDIM> &dyn_dim_flag_) :
31 658701 : TensorDim(TensorDim::TensorType(fm, d_type), eff_dim_flag_, dyn_dim_flag_) {}
32 :
33 1478027 : TensorDim::TensorDim(TensorType t_type_,
34 : const std::bitset<MAXDIM> &eff_dim_flag_,
35 1478027 : const std::bitset<MAXDIM> &dyn_dim_flag_) :
36 1478027 : t_type(t_type_), eff_dim_flag(eff_dim_flag_), dyn_dim_flag(dyn_dim_flag_) {
37 7390135 : for (size_t i = 0; i < MAXDIM; ++i) {
38 5912108 : dim[i] = 0;
39 : }
40 1478027 : len = 0;
41 1478027 : feature_len = 0;
42 1478027 : }
43 :
44 68285 : TensorDim::TensorDim(std::initializer_list<size_t> dims, TensorType t_type_) :
45 68285 : TensorDim(t_type_) {
46 68285 : int shift_size = MAXDIM - dims.size();
47 :
48 68285 : if (shift_size < 0) {
49 2 : throw std::invalid_argument("[TensorDim] max dimension is 4");
50 : }
51 :
52 : unsigned int cnt = 0;
53 :
54 248406 : for (auto &i : dims) {
55 180126 : setTensorDim(shift_size + cnt, i);
56 180123 : cnt += 1;
57 : }
58 68280 : }
59 :
60 1 : TensorDim::TensorDim(size_t d3, TensorType t_type_,
61 : const std::bitset<MAXDIM> &eff_dim_flag_,
62 1 : const std::bitset<MAXDIM> &dyn_dim_flag_) :
63 1 : TensorDim(t_type_, eff_dim_flag_, dyn_dim_flag_) {
64 :
65 1 : setTensorDim(3, d3);
66 1 : feature_len = d3;
67 1 : len = feature_len;
68 1 : }
69 :
70 1 : TensorDim::TensorDim(size_t d2, size_t d3, TensorType t_type_,
71 : const std::bitset<MAXDIM> &eff_dim_flag_,
72 1 : const std::bitset<MAXDIM> &dyn_dim_flag_) :
73 1 : TensorDim(t_type_, eff_dim_flag_, dyn_dim_flag_) {
74 :
75 1 : setTensorDim(2, d2);
76 1 : setTensorDim(3, d3);
77 1 : feature_len = d2 * d3;
78 1 : len = feature_len;
79 1 : }
80 :
81 1 : TensorDim::TensorDim(size_t d1, size_t d2, size_t d3, TensorType t_type_,
82 : const std::bitset<MAXDIM> &eff_dim_flag_,
83 1 : const std::bitset<MAXDIM> &dyn_dim_flag_) :
84 1 : TensorDim(t_type_, eff_dim_flag_, dyn_dim_flag_) {
85 :
86 1 : setTensorDim(1, d1);
87 1 : setTensorDim(2, d2);
88 1 : setTensorDim(3, d3);
89 1 : feature_len = d1 * d2 * d3;
90 1 : len = feature_len;
91 1 : }
92 :
93 0 : TensorDim::TensorDim(const std::array<size_t, 3> &shapes, TensorType t_type_) :
94 0 : TensorDim({shapes[0], shapes[1], shapes[2]}, t_type_) {}
95 :
96 189952 : TensorDim::TensorDim(size_t d0, size_t d1, size_t d2, size_t d3,
97 : TensorType t_type_,
98 : const std::bitset<MAXDIM> &eff_dim_flag_,
99 189952 : const std::bitset<MAXDIM> &dyn_dim_flag_) :
100 189952 : TensorDim(t_type_, eff_dim_flag_, dyn_dim_flag_) {
101 :
102 189952 : setTensorDim(0, d0);
103 189952 : setTensorDim(1, d1);
104 189952 : setTensorDim(2, d2);
105 189952 : setTensorDim(3, d3);
106 189952 : feature_len = d1 * d2 * d3;
107 189952 : len = d0 * feature_len;
108 189952 : }
109 :
110 2184 : TensorDim::TensorDim(size_t d0, size_t d1, size_t d2, size_t d3,
111 : TensorDim::Format fm, TensorDim::DataType d_type,
112 : const std::bitset<MAXDIM> &eff_dim_flag_,
113 2184 : const std::bitset<MAXDIM> &dyn_dim_flag_) :
114 : TensorDim(d0, d1, d2, d3, TensorType(fm, d_type), eff_dim_flag_,
115 2184 : dyn_dim_flag_) {}
116 :
117 278 : TensorDim::TensorDim(const std::string &shape, TensorType t_type_) :
118 278 : TensorDim() {
119 278 : if (setTensorDim(shape, t_type_) != ML_ERROR_NONE) {
120 0 : throw std::invalid_argument("[TensorDim] Setting TensorDim failed");
121 : }
122 278 : }
123 :
124 0 : TensorDim::TensorDim(const std::string &shape, TensorDim::Format fm,
125 : TensorDim::DataType d_type,
126 0 : TensorDim::StorageOrder order) :
127 0 : TensorDim() {
128 0 : if (setTensorDim(shape, TensorType(fm, d_type, order)) != ML_ERROR_NONE) {
129 0 : throw std::invalid_argument("[TensorDim] Setting TensorDim failed");
130 : }
131 0 : }
132 :
133 1282990 : TensorDim &TensorDim::operator=(const TensorDim &rhs) {
134 : using std::swap;
135 :
136 1282990 : TensorDim tmp(rhs);
137 1282990 : swap(*this, tmp);
138 1282990 : return *this;
139 : }
140 :
141 1036 : TensorDim &TensorDim::operator=(TensorDim &&rhs) noexcept {
142 : using std::swap;
143 :
144 1036 : swap(*this, rhs);
145 1036 : return *this;
146 : }
147 :
148 63750 : unsigned int TensorDim::getDataTypeSize() const {
149 63750 : switch (t_type.data_type) {
150 : case TensorDim::DataType::FP16:
151 : #ifdef ENABLE_FP16
152 : return sizeof(_FP16);
153 : #else
154 : return 2;
155 : #endif
156 : case TensorDim::DataType::FP32:
157 : return sizeof(float);
158 : case TensorDim::DataType::UINT4:
159 : return sizeof(uint8_t);
160 : case TensorDim::DataType::UINT8:
161 : return sizeof(uint8_t);
162 : case TensorDim::DataType::UINT16:
163 : return sizeof(uint16_t);
164 : case TensorDim::DataType::UINT32:
165 : return sizeof(uint32_t);
166 : case TensorDim::DataType::Q4_K:
167 : return sizeof(uint8_t);
168 : case TensorDim::DataType::QINT16:
169 : return sizeof(int16_t);
170 : case TensorDim::DataType::QINT8:
171 : return sizeof(int8_t);
172 : case TensorDim::DataType::QINT4:
173 : return sizeof(int8_t);
174 : case TensorDim::DataType::BCQ:
175 : return sizeof(uint32_t);
176 : case TensorDim::DataType::Q6_K:
177 : return sizeof(uint8_t);
178 : case TensorDim::DataType::Q4_0:
179 : return sizeof(uint8_t);
180 : default:
181 : return sizeof(float);
182 : }
183 : }
184 :
185 2303333 : void TensorDim::resetLen() {
186 2303333 : feature_len = dim[1] * dim[2] * dim[3];
187 2303333 : len = dim[0] * feature_len;
188 2303333 : }
189 :
190 714577 : const size_t TensorDim::getTensorDim(unsigned int idx) const {
191 714577 : if (idx >= MAXDIM)
192 : throw std::invalid_argument(
193 0 : "[TensorDim] Tensor Dimension index should be between 0 and 4");
194 :
195 714577 : return dim[idx];
196 : }
197 :
198 2303342 : void TensorDim::setTensorDim(unsigned int idx, size_t value) {
199 2303342 : if (idx >= MAXDIM)
200 : throw std::out_of_range(
201 0 : "[TensorDim] Tensor Dimension index should be between 0 and 4");
202 :
203 2303342 : if (value <= 0)
204 : throw std::invalid_argument(
205 9 : "[TensorDim] Trying to assign value <=0 to tensor dim");
206 :
207 2303333 : if (len == 0) {
208 1312250 : for (size_t i = 0; i < MAXDIM; ++i) {
209 1049800 : dim[i] = 1;
210 : }
211 : }
212 :
213 2303333 : dim[idx] = value;
214 2303333 : resetLen();
215 2303333 : }
216 :
217 280 : int TensorDim::setTensorDim(const std::string &input_shape,
218 : TensorType t_type_) {
219 : int status = ML_ERROR_NONE;
220 280 : static const std::regex words_regex("[^\\s.,:;!?]+");
221 : auto words_begin =
222 280 : std::sregex_iterator(input_shape.begin(), input_shape.end(), words_regex);
223 280 : auto words_end = std::sregex_iterator();
224 280 : int cur_dim = std::distance(words_begin, words_end);
225 280 : if (cur_dim <= 0 || (size_t)cur_dim > MAXDIM) {
226 1 : ml_loge("Tensor Dimension should be between 1 and 4");
227 1 : return ML_ERROR_INVALID_PARAMETER;
228 : }
229 : int cn = 0;
230 1239 : for (std::sregex_iterator i = words_begin; i != words_end; ++i, ++cn) {
231 2880 : setTensorDim(MAXDIM - cur_dim + cn, std::stoul((*i).str()));
232 : }
233 279 : t_type = t_type_;
234 279 : return status;
235 : }
236 :
237 658 : void TensorDim::setEffDimFlag(const std::bitset<MAXDIM> &dim_flag_) {
238 658 : eff_dim_flag = dim_flag_;
239 658 : }
240 :
241 2516 : void TensorDim::setDynDimFlag(const std::bitset<MAXDIM> &dim_flag_) {
242 2516 : dyn_dim_flag = dim_flag_;
243 2516 : }
244 :
245 2 : const std::bitset<TensorDim::MAXDIM> &TensorDim::getEffDimFlag() const {
246 2 : return eff_dim_flag;
247 : }
248 :
249 2 : const std::bitset<TensorDim::MAXDIM> &TensorDim::getDynDimFlag() const {
250 2 : return dyn_dim_flag;
251 : }
252 :
253 1284026 : void swap(TensorDim &lhs, TensorDim &rhs) noexcept {
254 1284026 : std::swap_ranges(std::begin(lhs.dim), std::begin(lhs.dim) + TensorDim::MAXDIM,
255 1284026 : std::begin(rhs.dim));
256 : std::swap(lhs.len, rhs.len);
257 : std::swap(lhs.feature_len, rhs.feature_len);
258 : std::swap(lhs.eff_dim_flag, rhs.eff_dim_flag);
259 : std::swap(lhs.dyn_dim_flag, rhs.dyn_dim_flag);
260 : std::swap(lhs.t_type, rhs.t_type);
261 1284026 : }
262 :
263 6856627 : size_t TensorDim::batch() const { return dim[0]; };
264 :
265 6456597 : size_t TensorDim::channel() const { return dim[1]; };
266 :
267 6655630 : size_t TensorDim::height() const { return dim[2]; };
268 :
269 6648329 : size_t TensorDim::width() const { return dim[3]; };
270 :
271 27208287 : size_t TensorDim::getDataLen() const { return len; };
272 :
273 266012 : size_t TensorDim::getFeatureLen() const { return feature_len; };
274 :
275 664812 : void TensorDim::batch(size_t b) { setTensorDim(0, b); }
276 :
277 114293 : void TensorDim::channel(size_t c) { setTensorDim(1, c); }
278 :
279 114382 : void TensorDim::height(size_t h) { setTensorDim(2, h); }
280 :
281 115942 : void TensorDim::width(size_t w) { setTensorDim(3, w); }
282 :
283 126695 : const size_t *TensorDim::getDim() const { return dim; }
284 :
285 1831 : unsigned int TensorDim::getNumDim() { return MAXDIM; }
286 :
287 1118 : TensorDim TensorDim::transpose(const std::string &direction) const {
288 : int dirs[MAXDIM - 1];
289 :
290 1118 : int status = nntrainer::getValues(3, direction, dirs);
291 1118 : NNTR_THROW_IF(status != ML_ERROR_NONE, std::invalid_argument)
292 : << "parsing direction failed";
293 :
294 1118 : const std::array<size_t, MAXDIM> axes{
295 1118 : {0, (size_t)dirs[0] + 1, (size_t)dirs[1] + 1, (size_t)dirs[2] + 1}};
296 :
297 1118 : return transpose(axes);
298 : }
299 :
300 1118 : TensorDim TensorDim::transpose(const std::array<size_t, MAXDIM> &axes) const {
301 1118 : TensorDim tmp(*this);
302 :
303 5590 : for (unsigned int i = 0; i < MAXDIM; ++i) {
304 4472 : tmp.setTensorDim(i, getTensorDim(axes[i]));
305 : }
306 :
307 1118 : return tmp;
308 : }
309 :
310 400799 : bool TensorDim::operator==(const TensorDim &rhs) const {
311 400799 : if (this->t_type.format != rhs.t_type.format)
312 : return false;
313 :
314 400799 : if (this->t_type.data_type != rhs.t_type.data_type)
315 : return false;
316 :
317 1872160 : for (size_t i = 0; i < MAXDIM; ++i) {
318 1539320 : if (this->dim[i] != rhs.dim[i]) {
319 : return false;
320 : }
321 : }
322 :
323 : return true;
324 : }
325 :
326 320326 : bool TensorDim::operator!=(const TensorDim &rhs) const {
327 320326 : return !(*this == rhs);
328 : }
329 :
330 0 : bool TensorDim::isEmpty() const { return len == 0; }
331 :
332 21348 : unsigned int TensorDim::rank() const {
333 : unsigned int rank = 0;
334 106740 : for (unsigned int i = 0; i < MAXDIM; i++) {
335 85392 : if (dim[i] > 1)
336 39236 : rank += 1;
337 : }
338 21348 : return rank;
339 : }
340 :
341 5752 : size_t &TensorDim::operator[](const unsigned int index) {
342 5752 : if (index >= MAXDIM)
343 : throw std::out_of_range(
344 0 : "[TensorDim] Tensor Dimension index should be between 0 and 4");
345 5752 : return dim[index];
346 : }
347 :
348 316293 : const size_t &TensorDim::operator[](const unsigned int index) const {
349 316293 : if (index >= MAXDIM)
350 : throw std::out_of_range(
351 0 : "[TensorDim] Tensor Dimension index should be between 0 and 4");
352 316293 : return dim[index];
353 : }
354 :
355 1471343 : std::array<size_t, TensorDim::MAXDIM> TensorDim::computeStrides() const {
356 1471343 : if (getFormat() == TensorDim::Format::NCHW) {
357 1471134 : return {dim[1] * dim[2] * dim[3], dim[2] * dim[3], dim[3], 1};
358 : } else {
359 209 : return {height() * channel() * width(), width() * channel(), channel(), 1};
360 : }
361 : }
362 :
363 1 : void TensorDim::reverse() { std::reverse(dim, dim + MAXDIM); }
364 :
365 78 : std::vector<int> TensorDim::getEffectiveDimension(bool dynamic) const {
366 : std::vector<int> eff_dim;
367 78 : eff_dim.reserve(eff_dim_flag.count());
368 :
369 : auto get_axis = [dynamic, this](unsigned int axis) -> int {
370 218 : if (dynamic && dyn_dim_flag[MAXDIM - axis - 1]) {
371 : return -1;
372 : }
373 :
374 192 : return dim[axis];
375 : };
376 :
377 390 : for (unsigned int i = 0; i < MAXDIM; ++i) {
378 : /// flip dim_flag to effectively match with our cognition
379 : /// ex) 3:5:1:1 -> 3:5, we are setting eff_dim_flag to 0b1100
380 312 : if (eff_dim_flag[MAXDIM - i - 1]) {
381 218 : eff_dim.push_back(get_axis(i));
382 : }
383 : }
384 :
385 78 : return eff_dim;
386 0 : }
387 :
388 49 : bool TensorDim::is_dynamic() const { return dyn_dim_flag.any(); }
389 :
390 966 : std::ostream &operator<<(std::ostream &out, TensorDim const &d) {
391 :
392 : std::string type_;
393 : if (d.getDataType() == ml::train::TensorDim::DataType::FP32) {
394 : type_ = "FP32";
395 : } else if (d.getDataType() == ml::train::TensorDim::DataType::FP16) {
396 : type_ = "FP16";
397 : } else if (d.getDataType() == ml::train::TensorDim::DataType::UINT4) {
398 : type_ = "UINT4";
399 : } else if (d.getDataType() == ml::train::TensorDim::DataType::UINT8) {
400 : type_ = "UINT8";
401 : } else if (d.getDataType() == ml::train::TensorDim::DataType::UINT16) {
402 : type_ = "UINT16";
403 : } else if (d.getDataType() == ml::train::TensorDim::DataType::UINT32) {
404 : type_ = "UINT32";
405 : } else if (d.getDataType() == ml::train::TensorDim::DataType::QINT16) {
406 : type_ = "QINT16";
407 : } else if (d.getDataType() == ml::train::TensorDim::DataType::QINT8) {
408 : type_ = "QINT8";
409 : } else if (d.getDataType() == ml::train::TensorDim::DataType::QINT4) {
410 : type_ = "QINT4";
411 : } else if (d.getDataType() == ml::train::TensorDim::DataType::BCQ) {
412 : type_ = "BCQ";
413 : } else if (d.getDataType() == ml::train::TensorDim::DataType::Q4_K) {
414 : type_ = "Q4_K";
415 : } else if (d.getDataType() == ml::train::TensorDim::DataType::Q6_K) {
416 : type_ = "Q6_K";
417 : } else if (d.getDataType() == ml::train::TensorDim::DataType::Q4_0) {
418 : type_ = "Q4_0";
419 : } else {
420 : type_ = "Unknown";
421 : }
422 :
423 : std::string format_ =
424 966 : (d.getFormat() == ml::train::TensorDim::Format::NCHW) ? "NCHW" : "NHWC";
425 2898 : out << "Shape: " << d.batch() << ":" << d.channel() << ":" << d.height()
426 966 : << ":" << d.width() << " [ " << type_ << " : " << format_ << " ]"
427 : << std::endl;
428 966 : return out;
429 : }
430 :
431 : } /* namespace train */
432 : } /* namespace ml */
|