Line data Source code
1 : // SPDX-License-Identifier: Apache-2.0
2 : /**
3 : * @file tensor_base.cpp
4 : * @date 04 December 2023
5 : * @brief This is Tensor base class
6 : * @see https://github.com/nnstreamer/nntrainer
7 : * @author Jijoong Moon <jijoong.moon@samsung.com>
8 : * @author Donghyeon Jeong <dhyeon.jeong@samsung.com>
9 : * @bug No known bugs except for NYI items
10 : */
11 :
12 : #include <tensor.h>
13 : #include <tensor_base.h>
14 :
15 : namespace nntrainer {
16 :
17 375714 : TensorBase::TensorBase(const TensorDim &d, bool alloc_now, Initializer init,
18 375714 : std::string name_) :
19 375714 : TensorBase(name_, d.getFormat()) {
20 375714 : if (d.getDataLen() != 0) {
21 375710 : dim = d;
22 375710 : strides = d.computeStrides();
23 375710 : initializer = init;
24 : }
25 375714 : }
26 :
27 10826 : bool TensorBase::operator==(const TensorBase &rhs) const {
28 10826 : if (this->dim != rhs.dim)
29 : return false;
30 :
31 10824 : if (size() != rhs.size())
32 : return false;
33 :
34 10824 : if (contiguous != rhs.contiguous)
35 : return false;
36 :
37 10824 : if (strides != rhs.strides)
38 : return false;
39 :
40 : return true;
41 : }
42 :
43 46136 : void TensorBase::setTensorVar(TensorDim d, void *buf, size_t offset) {
44 46136 : dim = d;
45 46136 : strides = d.computeStrides();
46 : /// Tensor does not own the memory
47 46136 : data = std::make_shared<MemoryData>(buf);
48 46136 : this->offset = offset;
49 46136 : }
50 :
51 2129 : void TensorBase::save(std::ostream &file) {
52 2129 : std::streamsize sz = static_cast<std::streamsize>(bytes());
53 2129 : NNTR_THROW_IF(sz < 0, std::invalid_argument)
54 0 : << "save size: " << bytes()
55 : << " is too big. It cannot be represented by std::streamsize";
56 :
57 2129 : checkedWrite(file, (char *)getData(), sz, "[Tensor::save] operation failed");
58 2129 : putData();
59 2129 : }
60 :
61 23193 : void TensorBase::read(std::ifstream &file, size_t start_offset,
62 : bool read_from_offset) {
63 23193 : if (start_offset == std::numeric_limits<size_t>::max()) {
64 0 : start_offset = file_offset;
65 : }
66 23193 : std::streamsize sz = static_cast<std::streamsize>(bytes());
67 :
68 23193 : NNTR_THROW_IF(sz < 0, std::invalid_argument)
69 0 : << "read size: " << bytes()
70 : << " is too big. It cannot be represented by std::streamsize";
71 :
72 23193 : checkedRead(file, (char *)getData(), sz, "[Tensor::read] operation failed",
73 : start_offset, read_from_offset);
74 23193 : putData();
75 23193 : }
76 :
77 0 : void TensorBase::read(ReadSource src, size_t start_offset,
78 : bool read_from_offset) {
79 0 : if (start_offset == std::numeric_limits<size_t>::max()) {
80 0 : start_offset = file_offset;
81 : }
82 0 : std::streamsize sz = static_cast<std::streamsize>(bytes());
83 :
84 0 : NNTR_THROW_IF(sz < 0, std::invalid_argument)
85 0 : << "read size: " << bytes()
86 : << " is too big. It cannot be represented by std::streamsize";
87 :
88 0 : checkedRead(src, (char *)getData(), sz, "[Tensor::read] operation failed",
89 : start_offset, read_from_offset);
90 0 : putData();
91 0 : }
92 :
93 0 : void TensorBase::readFSU() {}
94 :
95 386074 : void TensorBase::putData() const {
96 386074 : if (!data)
97 : return;
98 :
99 : data->invalidate();
100 : }
101 :
102 138468 : void TensorBase::setMemoryData(const std::shared_ptr<MemoryData> buf,
103 : size_t off) {
104 138468 : if (buf) {
105 : data = buf;
106 51633 : offset = off;
107 : } else {
108 : data = nullptr;
109 86835 : offset = 0;
110 : }
111 138468 : }
112 :
113 34174 : const std::shared_ptr<MemoryData> TensorBase::getMemoryData() const {
114 34174 : return data;
115 : }
116 :
117 34173 : size_t TensorBase::getOffset() const { return offset; }
118 :
119 0 : size_t TensorBase::getFileOffset() const { return file_offset; }
120 :
121 0 : void TensorBase::setFileOffset(size_t off) { file_offset = off; }
122 :
123 113691 : void TensorBase::reshape(const TensorDim &d) {
124 113691 : NNTR_THROW_IF(!contiguous, std::invalid_argument)
125 : << getName() << " is not contiguous, cannot reshape.";
126 :
127 113691 : NNTR_THROW_IF(d.getDataLen() != dim.getDataLen(), std::invalid_argument)
128 : << "[Tensor]: reshape cannot change the buffer size, trying reshaping "
129 : "\nfrom "
130 3 : << getDim() << " to " << d;
131 :
132 113688 : dim.batch(d.batch());
133 113688 : dim.channel(d.channel());
134 113688 : dim.height(d.height());
135 113688 : dim.width(d.width());
136 :
137 113688 : strides = d.computeStrides();
138 113688 : }
139 :
140 20419 : void TensorBase::updateBatch(unsigned int batch) {
141 20419 : if (dim.batch() == batch) {
142 : return;
143 : }
144 :
145 17366 : if (isAllocated())
146 1 : throw std::invalid_argument("Cannot update batch for an allocated tensor");
147 17365 : dim.batch(batch);
148 : }
149 :
150 0 : void TensorBase::updateDimension(TensorDim dimension) {
151 0 : if (dim == dimension) {
152 : return;
153 : }
154 :
155 0 : if (isAllocated())
156 : throw std::invalid_argument(
157 0 : "Cannot update tensor dimension for an allocated tensor");
158 :
159 0 : dim = dimension;
160 : }
161 :
162 102898898 : size_t TensorBase::getIndex(unsigned int b, unsigned int c, unsigned int h,
163 : unsigned int w) const noexcept {
164 102898898 : if (getFormat() == Tformat::NCHW) {
165 102898892 : return (b * strides[0] + c * strides[1] + h * strides[2] + w * strides[3]);
166 : } else {
167 6 : return (b * strides[0] + h * strides[1] + w * strides[2] + c * strides[3]);
168 : }
169 : }
170 :
171 169889 : void TensorBase::mergeAxis(unsigned int axis1, unsigned int axis2) {
172 169889 : dim.setTensorDim(axis2, dim.getTensorDim(axis1) * dim.getTensorDim(axis2));
173 169889 : dim.setTensorDim(axis1, 1);
174 169889 : }
175 :
176 301854 : void TensorBase::allocateSrcTensor() {
177 301854 : if (src_tensor) {
178 301854 : data = src_tensor->tensor()->data;
179 301854 : offset = src_tensor->tensor()->offset + src_tensor->offset();
180 : }
181 301854 : }
182 :
183 301854 : void TensorBase::createSharedDataTensor(const TensorBase *src, TensorBase *dest,
184 : size_t offset) const {
185 : /**
186 : * - If src already has data allocated, then directly make dest tensor based
187 : * on the src tensor.
188 : * - If src->data does not exist (meaning tensor does not memory allocated),
189 : * and src->src_tensor does not exist (meaning the src tensor does not depened
190 : * on another tensor), then create a SrcSharedTensor around the src.
191 : * - If src->src_tensor exists, then use the src->src_tensor to create the
192 : * required SrcSharedTensor to avoid recursive dependency.
193 : *
194 : * @note src->data and src->src_tensor CAN co-exist. src->src_tensor is stored
195 : * if the batch size of src is updated and needs reallocation.
196 : */
197 : dest->data = nullptr;
198 301854 : if (src->data) {
199 301854 : dest->src_tensor = std::make_shared<SrcSharedTensorBase>(src, offset);
200 301854 : dest->allocate();
201 0 : } else if (!src->src_tensor)
202 0 : dest->src_tensor = std::make_shared<SrcSharedTensorBase>(src, offset);
203 : else
204 0 : dest->src_tensor = std::make_shared<SrcSharedTensorBase>(
205 0 : src->src_tensor->tensor(), offset + src->src_tensor->offset());
206 301854 : }
207 :
208 301856 : void TensorBase::getSharedDataTensor(const TensorDim dim_, size_t offset,
209 : bool reset_stride,
210 : const std::string &name_,
211 : TensorBase *ret) {
212 301856 : if (dim_.getFormat() != ret->dim.getFormat())
213 0 : throw std::invalid_argument("Tensor format does not match");
214 :
215 301856 : ret->dim = dim_;
216 301856 : if (!name_.empty())
217 8176 : ret->name = name_;
218 :
219 301856 : if (dim_.getDataLen() + offset > dim.getDataLen())
220 : throw std::invalid_argument(
221 2 : "Creating shared tensor of size bigger than tensor memory.");
222 :
223 301854 : if (reset_stride)
224 277433 : ret->strides = ret->dim.computeStrides();
225 :
226 301854 : TensorDim new_match_dim = dim_;
227 301854 : new_match_dim.batch(dim.batch());
228 301854 : if (new_match_dim != dim && !reset_stride)
229 16245 : ret->contiguous = false;
230 :
231 : /**
232 : * In this case, its the caller's responsibility to ensure that allocate() is
233 : * called for the output tensor before operating on the output tensor.
234 : */
235 301854 : createSharedDataTensor(this, ret, offset);
236 301854 : }
237 :
238 : TensorBase::BroadcastInfo
239 10829 : TensorBase::computeBroadcastInfo(const Tensor &m) const {
240 10829 : if (m.size() > this->size())
241 30 : throw exception::not_supported("broadcasting *this is not supported");
242 :
243 10814 : const TensorDim m_dim = m.getDim();
244 :
245 : BroadcastInfo e;
246 10814 : e.tensor_type = getTensorType();
247 :
248 10814 : unsigned int continuity[4] = {0, 1, 2, 3};
249 10814 : if (getFormat() == Tformat::NHWC) {
250 10 : continuity[1] = 2;
251 10 : continuity[2] = 3;
252 10 : continuity[3] = 1;
253 : }
254 :
255 : /// checking if given Tensor's can be broadcasted
256 53861 : for (unsigned int i = 0; i < TensorDim::MAXDIM; ++i) {
257 43198 : if (dim.getTensorDim(continuity[i]) == m_dim.getTensorDim(continuity[i])) {
258 29597 : e.strides[i] = m.getStrides()[i];
259 29597 : continue;
260 : }
261 :
262 : /// If given dimension is 1, it could be reused, the stride remaining 0
263 : /// Need to check if dim[i] == 1 && m_dim[i] == 1 first though
264 : /// If so, strides should not change
265 13601 : if (m_dim.getTensorDim(continuity[i]) == 1) {
266 13450 : continue;
267 : }
268 :
269 151 : std::stringstream ss;
270 : ss << "[computeBroadcastInfo] broadcasting only allowed for "
271 : "dimension value of 1 \n"
272 302 : << "this: " << dim << "target: " << m_dim;
273 453 : throw std::invalid_argument(ss.str().c_str());
274 151 : }
275 :
276 : /// calculate inner loop size
277 10663 : e.buffer_size = 1;
278 : e.buffer_axis = -1;
279 10663 : e.strides[3] = m.getStrides()[3];
280 :
281 : /// initiate buffer info with matching dimension strategy
282 34462 : for (int axis = 3; axis >= 0; --axis) {
283 68924 : if (dim.getTensorDim(continuity[axis]) !=
284 34462 : m_dim.getTensorDim(continuity[axis])) {
285 10663 : e.buffer_axis = axis;
286 10663 : break;
287 : }
288 :
289 23799 : e.buffer_size *= dim.getTensorDim(continuity[axis]);
290 : }
291 :
292 : /// check strategy that uses consecutive ones
293 10663 : if (m_dim.getTensorDim(continuity[3]) == 1) {
294 : unsigned int inner_loop_size = 1;
295 : int axis;
296 31025 : for (axis = 3; axis >= 0; --axis) {
297 25643 : if (m_dim.getTensorDim(continuity[axis]) != 1) {
298 : break;
299 : }
300 :
301 23930 : inner_loop_size *= dim.getTensorDim(continuity[axis]);
302 : }
303 :
304 : /// if consecutive-one strategy has bigger chunk size, replace the
305 : /// information
306 7095 : if (inner_loop_size > e.buffer_size) {
307 7065 : e.buffer_axis = axis;
308 7065 : e.buffer_size = inner_loop_size;
309 7065 : e.strides[3] = 0;
310 : }
311 : }
312 :
313 10663 : return e;
314 : }
315 :
316 36824 : void TensorBase::calculateFlattenDot(
317 : Tensor const &input, Tensor &output, bool trans, bool trans_in,
318 : unsigned int &first_three_flat, unsigned int &last_axis,
319 : unsigned int &input_first_three_flat, unsigned int &input_last_axis,
320 : unsigned int &M, unsigned int &N, unsigned int &K, unsigned int &lda,
321 : unsigned int &ldb, unsigned int &ldc) const {
322 :
323 36824 : if (trans && dim.rank() > 2) {
324 932 : ml_logw("Warning: support only for rank of dot matrix <= 2 with trans");
325 : }
326 :
327 36824 : if (getFormat() == Tformat::NHWC) {
328 15 : first_three_flat = batch() * height() * width();
329 15 : last_axis = channel();
330 15 : input_first_three_flat = input.batch() * input.height() * input.width();
331 15 : input_last_axis = input.channel();
332 : } else {
333 36809 : first_three_flat = batch() * channel() * height();
334 36809 : last_axis = width();
335 36809 : input_first_three_flat = input.batch() * input.channel() * input.height();
336 36809 : input_last_axis = input.width();
337 : }
338 :
339 36824 : if (!trans && !trans_in) {
340 17794 : if (last_axis != input_first_three_flat)
341 : throw std::runtime_error(
342 2 : "Error: incompatible dimensions for dot product");
343 17792 : K = input_first_three_flat; /** == last_axis */
344 17792 : N = input_last_axis;
345 17792 : M = first_three_flat;
346 17792 : if (getFormat() == Tformat::NHWC) {
347 15 : CREATE_IF_EMPTY_DIMS(output, batch(), N, height(), width(),
348 : getTensorType()); // NHWC Result Tensor
349 : } else {
350 21391 : CREATE_IF_EMPTY_DIMS(output, batch(), channel(), height(), N,
351 : getTensorType());
352 : }
353 :
354 : // We are not set zero the output because of performance reason.
355 : // However, output is not initialized properly. There might include
356 : // garbage like nan. When we have to use this value as in C = alpha*A*B +
357 : // beta*C, then have to check garbage data of C is not effect or not.
358 :
359 19030 : } else if (!trans && trans_in) {
360 8356 : if (last_axis != input_last_axis)
361 : throw std::runtime_error(
362 0 : "Error: incompatible dimensions for dot product");
363 8356 : K = input_last_axis; /** == last_axis */
364 8356 : N = input_first_three_flat;
365 8356 : M = first_three_flat;
366 8356 : if (getFormat() == Tformat::NHWC) {
367 0 : CREATE_IF_EMPTY_DIMS(output, batch(), N, height(), width(),
368 : getTensorType());
369 : } else {
370 9232 : CREATE_IF_EMPTY_DIMS(output, batch(), channel(), height(), N,
371 : getTensorType());
372 : }
373 10674 : } else if (trans && !trans_in) {
374 10664 : if (first_three_flat != input_first_three_flat)
375 : throw std::runtime_error(
376 1 : "Error: incompatible dimensions for dot product");
377 10663 : K = input_first_three_flat; /** == first_three_flat */
378 10663 : N = input_last_axis;
379 10663 : M = last_axis;
380 10663 : if (getFormat() == Tformat::NHWC) {
381 0 : CREATE_IF_EMPTY_DIMS(output, 1, N, M, 1, getTensorType());
382 : } else {
383 11517 : CREATE_IF_EMPTY_DIMS(output, 1, 1, M, N, getTensorType());
384 : }
385 : } else {
386 10 : if (first_three_flat != input_last_axis)
387 : throw std::runtime_error(
388 0 : "Error: incompatible dimensions for dot product");
389 10 : K = input_last_axis; /** == first_three_flat */
390 10 : N = input_first_three_flat;
391 10 : M = last_axis;
392 10 : if (getFormat() == Tformat::NHWC) {
393 0 : CREATE_IF_EMPTY_DIMS(output, 1, N, M, 1, getTensorType());
394 : } else {
395 20 : CREATE_IF_EMPTY_DIMS(output, 1, 1, M, N, getTensorType());
396 : }
397 : }
398 :
399 36821 : lda = last_axis;
400 36821 : ldb = input_last_axis;
401 36821 : ldc = (getFormat() == Tformat::NHWC) ? output.channel() : output.width();
402 36821 : }
403 :
404 : /**
405 : * Please note that the following functions need to be implemented in a child
406 : * class to utilize tensor operations fully — operations such as addition,
407 : * division, multiplication, dot production, data averaging, and so on.
408 : */
409 0 : void TensorBase::setRandNormal(float mean, float stddev) {
410 : throw std::invalid_argument(
411 0 : "Tensor::setRandNormal() is currently not supported in tensor data type " +
412 0 : getStringDataType());
413 : }
414 :
415 0 : void TensorBase::setRandUniform(float min, float max) {
416 : throw std::invalid_argument(
417 0 : "Tensor::setRandUniform() is currently not supported in tensor data type " +
418 0 : getStringDataType());
419 : }
420 :
421 0 : void TensorBase::setRandBernoulli(float probability) {
422 : throw std::invalid_argument("Tensor::setRandBernoulli() is currently not "
423 0 : "supported in tensor data type " +
424 0 : getStringDataType());
425 : }
426 :
427 0 : Tensor TensorBase::multiply_strided(Tensor const &m, Tensor &output,
428 : const float beta) const {
429 : throw std::invalid_argument("Tensor::multiply_strided() is currently not "
430 0 : "supported in tensor data type " +
431 0 : getStringDataType());
432 : }
433 :
434 0 : int TensorBase::multiply_i(float const &value) {
435 : throw std::invalid_argument(
436 0 : "Tensor::multiply_i() is currently not supported in tensor data type " +
437 0 : getStringDataType());
438 : }
439 :
440 0 : Tensor &TensorBase::multiply(float const &value, Tensor &output) const {
441 : throw std::invalid_argument(
442 0 : "Tensor::multiply() is currently not supported in tensor data type " +
443 0 : getStringDataType());
444 : }
445 :
446 0 : Tensor &TensorBase::multiply(Tensor const &m, Tensor &output,
447 : const float beta) const {
448 : throw std::invalid_argument(
449 0 : "Tensor::multiply() is currently not supported in tensor data type " +
450 0 : getStringDataType());
451 : }
452 :
453 0 : Tensor &TensorBase::divide(float const &value, Tensor &output) const {
454 : throw std::invalid_argument(
455 0 : "Tensor::divide() is currently not supported in tensor data type " +
456 0 : getStringDataType());
457 : }
458 :
459 0 : Tensor &TensorBase::divide(Tensor const &m, Tensor &output) const {
460 : throw std::invalid_argument(
461 0 : "Tensor::divide() is currently not supported in tensor data type " +
462 0 : getStringDataType());
463 : }
464 :
465 0 : Tensor &TensorBase::add_strided(Tensor const &input, Tensor &output,
466 : const float beta) const {
467 : throw std::invalid_argument(
468 0 : "Tensor::add_strided() is currently not supported in tensor data type " +
469 0 : getStringDataType());
470 : }
471 :
472 0 : int TensorBase::add_i_partial(unsigned int len, unsigned int addr_idx,
473 : Tensor &m, unsigned int incX, unsigned int incY,
474 : const Tensor alphas, unsigned int alpha_idx) {
475 : throw std::invalid_argument(
476 0 : "Tensor::add_i_partial() is currently not supported in tensor data type " +
477 0 : getStringDataType());
478 : }
479 :
480 0 : Tensor &TensorBase::add(float const &value, Tensor &output) const {
481 : throw std::invalid_argument(
482 0 : "Tensor::add() is currently not supported in tensor data type " +
483 0 : getStringDataType());
484 : }
485 :
486 0 : Tensor &TensorBase::add(Tensor const &m, Tensor &output,
487 : float const alpha) const {
488 : throw std::invalid_argument(
489 0 : "Tensor::add() is currently not supported in tensor data type " +
490 0 : getStringDataType());
491 : }
492 :
493 0 : Tensor &TensorBase::subtract(float const &value, Tensor &output) const {
494 : throw std::invalid_argument(
495 0 : "Tensor::subtract() is currently not supported in tensor data type " +
496 0 : getStringDataType());
497 : }
498 :
499 0 : void TensorBase::sum_by_batch(Tensor &output) const {
500 : throw std::invalid_argument(
501 0 : "Tensor::sum_by_batch() is currently not supported in tensor data type " +
502 0 : getStringDataType());
503 : }
504 :
505 0 : Tensor &TensorBase::sum(unsigned int axis, Tensor &output, float alpha,
506 : float beta) const {
507 : throw std::invalid_argument(
508 0 : "Tensor::sum() is currently not supported in tensor data type " +
509 0 : getStringDataType());
510 : }
511 :
512 0 : Tensor &TensorBase::abs(Tensor &output) const {
513 : throw std::invalid_argument(
514 0 : "Tensor::abs() is currently not supported in tensor data type " +
515 0 : getStringDataType());
516 : }
517 :
518 0 : float TensorBase::l2norm() const {
519 : throw std::invalid_argument(
520 0 : "Tensor::l2norm() is currently not supported in tensor data type " +
521 0 : getStringDataType());
522 : }
523 :
524 0 : Tensor &TensorBase::pow(float exponent, Tensor &output) const {
525 : throw std::invalid_argument(
526 0 : "Tensor::pow() is currently not supported in tensor data type " +
527 0 : getStringDataType());
528 : }
529 :
530 0 : Tensor &TensorBase::sqrt(Tensor &output) const {
531 : throw std::invalid_argument(
532 0 : "Tensor::sqrt() is currently not supported in tensor data type " +
533 0 : getStringDataType());
534 : }
535 :
536 0 : Tensor &TensorBase::erf(Tensor &output) const {
537 : throw std::invalid_argument(
538 0 : "Tensor::erf() is currently not supported in tensor data type " +
539 0 : getStringDataType());
540 : }
541 :
542 0 : void TensorBase::sin(Tensor &out, float alpha) {
543 : throw std::invalid_argument(
544 0 : "Tensor::sin() is currently not supported in tensor data type " +
545 0 : getStringDataType());
546 : }
547 :
548 0 : void TensorBase::cos(Tensor &out, float alpha) {
549 : throw std::invalid_argument(
550 0 : "Tensor::cos() is currently not supported in tensor data type " +
551 0 : getStringDataType());
552 : }
553 :
554 0 : void TensorBase::tan(Tensor &output, float alpha) {
555 : throw std::invalid_argument(
556 0 : "Tensor::tan() is currently not supported in tensor data type " +
557 0 : getStringDataType());
558 : }
559 :
560 0 : void TensorBase::inv_sqrt(Tensor &out) {
561 : throw std::invalid_argument(
562 0 : "Tensor::inv_sqrt() is currently not supported in tensor data type " +
563 0 : getStringDataType());
564 : }
565 :
566 0 : Tensor &TensorBase::dot(Tensor const &input, Tensor &output, bool trans,
567 : bool trans_in, float beta) const {
568 : throw std::invalid_argument(
569 0 : "Tensor::dot() is currently not supported in tensor data type " +
570 0 : getStringDataType());
571 : }
572 :
573 0 : void TensorBase::dot(std::vector<Tensor *> input, std::vector<Tensor *> output,
574 : bool trans, bool trans_in, float beta) const {
575 : throw std::invalid_argument("Tensor::dot(std::vector<Tensor*>) is currently "
576 0 : "not supported in tensor data type " +
577 0 : getStringDataType());
578 : }
579 :
580 0 : void TensorBase::dropout_mask(float dropout) {
581 : throw std::invalid_argument(
582 0 : "Tensor::dropout_mask() is currently not supported in tensor data type " +
583 0 : getStringDataType());
584 : }
585 :
586 0 : void TensorBase::filter_mask(const Tensor &mask_len, bool reverse) {
587 : throw std::invalid_argument(
588 0 : "Tensor::filter_mask() is currently not supported in tensor data type " +
589 0 : getStringDataType());
590 : }
591 :
592 0 : void TensorBase::zoneout_mask(Tensor &opposite, float zoneout) {
593 : throw std::invalid_argument(
594 0 : "Tensor::zoneout_mask() is currently not supported in tensor data type " +
595 0 : getStringDataType());
596 : }
597 :
598 0 : std::vector<Tensor> TensorBase::split(std::vector<size_t> sizes, int axis) {
599 : throw std::invalid_argument(
600 0 : "Tensor::split() is currently not supported in tensor data type " +
601 0 : getStringDataType());
602 : }
603 :
604 0 : Tensor TensorBase::concat(const std::vector<Tensor> &tensors, int axis,
605 : Tensor &output) {
606 : throw std::invalid_argument(
607 0 : "Tensor::concat() is currently not supported in tensor data type " +
608 0 : getStringDataType());
609 : }
610 :
611 0 : Tensor &TensorBase::apply(std::function<float(float)> f, Tensor &output) const {
612 : throw std::invalid_argument(
613 : "Tensor::apply(std::function<float(float)> f, Tensor &output) is "
614 0 : "not supported in tensor data type " +
615 0 : getStringDataType());
616 : }
617 :
618 : #ifdef ENABLE_FP16
619 : Tensor &TensorBase::apply(std::function<_FP16(_FP16)> f, Tensor &output) const {
620 : throw std::invalid_argument(
621 : "Tensor::apply(std::function<_FP16(_FP16)> f, Tensor &output) is "
622 : "not supported in tensor data type " +
623 : getStringDataType());
624 : }
625 : #endif
626 :
627 0 : std::vector<unsigned int> TensorBase::argmax() const {
628 : throw std::invalid_argument(
629 0 : "Tensor::argmax() is currently not supported in tensor data type " +
630 0 : getStringDataType());
631 : }
632 :
633 0 : std::vector<unsigned int> TensorBase::argmin() const {
634 : throw std::invalid_argument(
635 0 : "Tensor::argmin() is currently not supported in tensor data type " +
636 0 : getStringDataType());
637 : }
638 :
639 1 : void TensorBase::topK(unsigned int k, void *output_data, uint32_t *indices) {
640 : throw std::invalid_argument(
641 1 : "Tensor::topK() is currently not supported in tensor data type " +
642 3 : getStringDataType());
643 : }
644 :
645 0 : Tensor &TensorBase::transpose(const std::string &direction, Tensor &out) const {
646 : throw std::invalid_argument(
647 0 : "Tensor::transpose() is currently not supported in tensor data type " +
648 0 : getStringDataType());
649 : }
650 :
651 : } // namespace nntrainer
|