Line data Source code
1 : // SPDX-License-Identifier: Apache-2.0
2 : /**
3 : * Copyright (C) 2020 Jijoong Moon <jijoong.moon@samsung.com>
4 : *
5 : * @file network_graph.h
6 : * @date 19 Oct 2020
7 : * @see https://github.com/nnstreamer/nntrainer
8 : * @author Jijoong Moon <jijoong.moon@samsung.com>
9 : * @bug No known bugs except for NYI items
10 : * @brief This is Network Graph Class for Neural Network
11 : *
12 : */
13 :
14 : #ifndef __NETWORK_GRAPH_H__
15 : #define __NETWORK_GRAPH_H__
16 : #ifdef __cplusplus
17 :
18 : #include <list>
19 : #include <map>
20 : #include <memory>
21 : #include <stack>
22 : #include <vector>
23 :
24 : #include <graph_core.h>
25 : #include <layer_node.h>
26 : #include <manager.h>
27 :
28 : namespace nntrainer {
29 : using ExecutionMode = ml::train::ExecutionMode;
30 :
31 : class Connection;
32 : /**
33 : * @class NeuralNetwork Graph Class
34 : * @brief NeuralNetwork Graph Class which manage layers
35 : */
36 : class NetworkGraph {
37 : public:
38 : /**
39 : * @brief Constructor of NeuralNetwork Graph Class
40 : */
41 849 : NetworkGraph() :
42 : tensor_manager(std::make_shared<Manager>()),
43 849 : graph(),
44 849 : compiled(false),
45 849 : batch_size(0),
46 849 : graph_exec_end(0),
47 849 : backward_iter_end(nullptr),
48 849 : forward_iter_end(nullptr),
49 849 : optimize_memory(true),
50 849 : exec_mode(ExecutionMode::TRAIN),
51 849 : tensor_format("NCHW"),
52 2547 : tensor_dtype(split("FP32-FP32", getRegex("\\-"))),
53 849 : is_clip_grad(false),
54 849 : loss_scale(1.0f) {
55 849 : nan_count = 0;
56 849 : }
57 :
58 : /**
59 : * @brief Constructor of NeuralNetwork Graph Class
60 : * @param[in] enable_fsu enable memory fsu for tensor
61 : * @param[in] mode execution mode (default ExecutionMode::TRAIN)
62 : * @param[in] fsu_path memory fsu file path when the fsu is enabled
63 : * @param[in] tensor_format define tensor format. One of NCHW and NHWC
64 : * (default NCHW)
65 : * @param[in] tensor_type It says weight type and activation type (default
66 : * FP32-FP32)
67 : */
68 694 : NetworkGraph(bool enable_fsu, ExecutionMode mode = ExecutionMode::TRAIN,
69 : const std::string &fsu_path = "", unsigned int lookahead = 0,
70 : const std::string &tensor_format_ = "NCHW",
71 694 : const std::string &tensor_dtype_ = "FP32-FP32") :
72 : tensor_manager(std::make_shared<Manager>(
73 : enable_fsu, fsu_path, lookahead, tensor_format_, tensor_dtype_, mode)),
74 694 : graph(),
75 694 : compiled(false),
76 694 : batch_size(0),
77 694 : graph_exec_end(0),
78 694 : backward_iter_end(nullptr),
79 694 : forward_iter_end(nullptr),
80 694 : optimize_memory(true),
81 694 : exec_mode(mode),
82 694 : tensor_format(tensor_format_),
83 1388 : tensor_dtype(split(tensor_dtype_, getRegex("\\-"))),
84 694 : is_clip_grad(false),
85 694 : loss_scale(1.0f) {
86 694 : nan_count = 0;
87 694 : }
88 :
89 : /**
90 : * @brief Destructor of the NeuralNetwork Graph class
91 : *
92 : */
93 5112 : ~NetworkGraph() = default;
94 :
95 : /**
96 : * @brief Compile the graph
97 : * @param[in] loss_type loss for the graph
98 : * returns ML_ERROR_NONE on success, error on failure
99 : */
100 : int compile(const std::string &loss_type);
101 :
102 : /**
103 : * @brief Create new LayerNode and add into Graph
104 : * @param[in] layer shared_ptr of Layer
105 : */
106 : void addLayer(std::shared_ptr<LayerNode> layer);
107 :
108 : /**
109 : * @brief get current flat graph from the model before sorting
110 : * @note graph contains pointer to the actual nodes, which is not deeply
111 : * copied.
112 : * @retval current flat graph
113 : *
114 : * @todo remove getting unsorted layers from model loader, compile model
115 : * loader
116 : */
117 : std::vector<std::shared_ptr<LayerNode>>
118 : getUnsortedLayers(const std::string &input_layer,
119 : const std::string &output_layer) const;
120 :
121 : /**
122 : * @brief getter of number of nodes
123 : * @param[out] number of nodes
124 : */
125 : unsigned int size() const { return graph.size(); }
126 :
127 : /**
128 : * @brief get if the graph is empty
129 : * @param[out] true if empty, else false
130 : */
131 : bool empty() const { return graph.empty(); }
132 :
133 : /**
134 : * @brief Swap function for the class
135 : */
136 : friend void swap(NetworkGraph &lhs, NetworkGraph &rhs) {
137 : /// @fixme this swap function need maintenance
138 : using std::swap;
139 :
140 540 : swap(lhs.graph, rhs.graph);
141 : }
142 :
143 : /**
144 : * @brief getter of Sorted LayerNode with index number
145 : * @param[in] index
146 : * @ret LayerNode
147 : */
148 20332 : std::shared_ptr<LayerNode> getSortedLayerNode(unsigned int ith) const {
149 20332 : return std::static_pointer_cast<LayerNode>(graph.getSortedNode(ith));
150 : }
151 :
152 : /**
153 : * @brief getter of LayerNode with layer name
154 : * @param[in] layer name
155 : * @retval LayerNode
156 : */
157 10181 : std::shared_ptr<LayerNode> getLayerNode(const std::string &layer_name) const {
158 10181 : return std::static_pointer_cast<LayerNode>(graph.getNode(layer_name));
159 : }
160 :
161 : /**
162 : * @brief getter all the layer nodes in the model
163 : * @retval Layer nodes
164 : * @note these layer nodes will be in sorted order if the model is compiled,
165 : * otherwise the order is the order of addition of layer nodes in the model.
166 : */
167 : std::vector<std::shared_ptr<LayerNode>> getLayerNodes() const;
168 :
169 : /**
170 : * @brief set batch size
171 : * @param[in] batch size
172 : */
173 : void setBatchSize(unsigned int batch_size);
174 :
175 : /**
176 : * @brief reset input dimensions of a model
177 : * @param[in] dims input dimensions
178 : * @note Similar to reinitialize, the resetInputDimension API is used for
179 : * modifying input dimensions after model initialization. The reinitialize
180 : * function should be the officially called API when changing input
181 : * dimensions, as it properly recalculates weights, tensors, and outputs for
182 : * each layer. On the other hand, resetInputDimension is a specialized API
183 : * created to modify only specific dimensions (specifically height values)
184 : * within input/output dimensions. Since this API uniformly adjusts the height
185 : * across all model layers, developers must verify that every layer in their
186 : * model architecture can safely accommodate such height modifications.
187 : */
188 : void resetInputDimension(std::vector<TensorDim> dims);
189 :
190 : /**
191 : * @brief try apply gradient if possible
192 : * @note if it is not the last of the gradient access, this is noop
193 : * @note if the gradient is to be clipped by norm, this is noop
194 : *
195 : * @param node node to try apply gradient
196 : * @param apply_func apply function
197 : */
198 : static void applyGradients(LayerNode *node,
199 : const std::function<void(Weight &)> &apply_func);
200 :
201 : /**
202 : * @brief forwarding network graph
203 : * @param[in] training true if forwarding is on training
204 : * @retval output tensors
205 : */
206 : sharedConstTensors forwarding(
207 : bool training = false,
208 : std::function<void(std::shared_ptr<LayerNode>, bool)> forwarding_op =
209 : [](std::shared_ptr<LayerNode>, bool) {},
210 : std::function<bool(void *userdata)> stop_cb =
211 : [](void *user_data) { return false; },
212 : void *user_data = nullptr);
213 :
214 : /**
215 : * @brief forwarding network graph
216 : * @param[in] from start step
217 : * @param[in] to end step
218 : * @param[in] training true if forwarding is on training
219 : * @retval output tensors
220 : */
221 : sharedConstTensors incremental_forwarding(
222 : unsigned int from, unsigned int to, bool training = false,
223 : std::function<void(std::shared_ptr<LayerNode>, bool)> forwarding_op =
224 : [](std::shared_ptr<LayerNode>, bool) {},
225 : std::function<bool(void *userdata)> stop_cb =
226 : [](void *user_data) { return false; },
227 : void *user_data = nullptr);
228 :
229 : /**
230 : * @brief backwarding the network graph
231 : * @param[in] iteration current iteration number
232 : * @param[in] forwarding_op operation for the forwarding
233 : * @param[in] backwarding_op operation for the backwarding
234 : * @param[in] lazy_apply_grad_op operation for applying the lazy gradients
235 : * @retval ret it is false then the gradient has NaN valude in mixed precision
236 : * training. If it is, then we need to control the loss scale factor and
237 : * compute again the derivatives.
238 : */
239 : bool backwarding(
240 : int iteration,
241 : std::function<void(std::shared_ptr<LayerNode>, bool)> &forwarding_op,
242 : std::function<bool(std::shared_ptr<LayerNode>, int)> &backwarding_op,
243 : std::function<void(Weight &, int)> &lazy_apply_grad_op,
244 : std::function<bool(void *userdata)> stop_cb =
245 : [](void *user_data) { return false; },
246 : void *user_data = nullptr);
247 :
248 : /**
249 : * @brief get begin iterator for the graph
250 : * @retval const iterator
251 : */
252 : graph_const_iterator<LayerNode> cbegin() const {
253 : return graph.cbegin<LayerNode>();
254 : }
255 :
256 : /**
257 : * @brief get end iterator for the graph
258 : * @retval const iterator
259 : */
260 : graph_const_iterator<LayerNode> cend() const {
261 : return graph.cend<LayerNode>();
262 : }
263 :
264 : /**
265 : * @brief get reverse begin iterator for the graph
266 : * @retval const reverse iterator
267 : */
268 : graph_const_reverse_iterator<LayerNode> crbegin() const {
269 : return graph.crbegin<LayerNode>();
270 : }
271 :
272 : /**
273 : * @brief get reverse end iterator for the graph
274 : * @retval const reverse iterator
275 : */
276 : graph_const_reverse_iterator<LayerNode> crend() const {
277 : return graph.crend<LayerNode>();
278 : }
279 :
280 : /**
281 : * @brief get begin iterator for the backwarding
282 : * @retval const reverse iterator marking the begin of backwarding
283 : */
284 : graph_const_reverse_iterator<LayerNode> getBackwardingBeginIter() const {
285 : return crbegin();
286 : }
287 :
288 : /**
289 : * @brief get end iterator for the backwarding
290 : * @retval const reverse iterator marking the end of backwarding
291 : */
292 : graph_const_reverse_iterator<LayerNode> getBackwardingEndIter() const {
293 : return crend();
294 : }
295 :
296 : /**
297 : * @brief getter of output dimension of graph
298 : * @retval output tensor dim list
299 : */
300 : std::vector<TensorDim> getOutputDimension() const;
301 :
302 : /**
303 : * @brief getter of input dimension of graph
304 : * @retval input tensor dim list
305 : */
306 : std::vector<TensorDim> getInputDimension() const;
307 :
308 : /**
309 : * @brief Get the Batch Size object of current model
310 : *
311 : * @return unsigned int
312 : */
313 : unsigned int getBatchSize() const;
314 :
315 : /**
316 : * @brief Copy the graph
317 : * @param[in] from Graph Object to copy
318 : * @retval Graph Object copyed
319 : */
320 : NetworkGraph ©(NetworkGraph &from) {
321 : graph.copy(from.graph);
322 0 : return *this;
323 : }
324 :
325 : /**
326 : * @brief initialize network graph
327 : *
328 : * @param model_input_names model input connection if empty list given, all of
329 : * node that can be inputs will be identified in the sort order
330 : * @param model_label_names model label names if empty list given, all of node
331 : * that can be labels will be identified in the sort order
332 : * @return int ML_ERROR_NONE if successful
333 : */
334 : int initialize(ExecutionMode mode = ExecutionMode::TRAIN,
335 : const std::vector<Connection> &model_input_names = {},
336 : const std::vector<Connection> &model_label_names = {});
337 :
338 : /**
339 : * @brief reinitialize network graph
340 : *
341 : * @param model_input_names model input connection if empty list given, all of
342 : * node that can be inputs will be identified in the sort order
343 : * @param model_label_names model label names if empty list given, all of node
344 : * that can be labels will be identified in the sort order
345 : * @return int ML_ERROR_NONE if successful
346 : */
347 : int reinitialize(const std::vector<Connection> &model_input_names = {},
348 : const std::vector<Connection> &model_label_names = {});
349 :
350 : /**
351 : * @brief Create run layer context from the given init layer context
352 : *
353 : * @param lnode layer node to finalize and set run context
354 : * @param prev_inputs previous input information
355 : */
356 : std::vector<Var_Grad *>
357 : finalizeContext(const std::shared_ptr<LayerNode> &lnode,
358 : const std::vector<Var_Grad *> &prev_inputs);
359 :
360 : /**
361 : * @brief Recreate run layer context from the given init layer context
362 : *
363 : * @param lnode layer node to finalize and set run context
364 : * @param prev_inputs previous input information
365 : */
366 : std::vector<Var_Grad *>
367 : refinalizeContext(const std::shared_ptr<LayerNode> &lnode,
368 : const std::vector<Var_Grad *> &prev_inputs);
369 :
370 : /** Interface for manager */
371 :
372 : /**
373 : * @brief Allocate memory for all the managed tensors
374 : *
375 : * @param[in] training If true, initialize derivates/gradients, else, do not.
376 : */
377 : void allocateTensors(ExecutionMode exec_mode_);
378 :
379 : /**
380 : * @brief Deallocate memory for all the managed tensors
381 : */
382 : void deallocateTensors(bool dealloc_weights = false) {
383 2484 : tensor_manager->deallocateTensors(dealloc_weights);
384 1775 : }
385 :
386 : /**
387 : * @brief Allocate memory for all the managed weights
388 : */
389 : void allocateWeights(bool init = true) {
390 : unsigned int max_exec_order =
391 617 : std::get<3>(backward_iter_end->getExecutionOrder());
392 :
393 617 : if (exec_mode == ExecutionMode::INFERENCE)
394 1 : max_exec_order = std::get<0>(forward_iter_end->getExecutionOrder());
395 617 : tensor_manager->allocateWeights(max_exec_order, init);
396 617 : }
397 :
398 : /**
399 : * @brief Deallocate memory for all the weights
400 : */
401 : void deallocateWeights() { tensor_manager->deallocateWeights(); }
402 :
403 : /**
404 : * @brief Enable the memory optimizations for the network
405 : *
406 : * @param val true to enable, else false
407 : */
408 : void setMemoryOptimizations(bool val) {
409 : tensor_manager->setOptimizations(val);
410 695 : optimize_memory = val;
411 : }
412 :
413 : /**
414 : * @brief Create optimizer variable for every weights
415 : *
416 : * @param cb Call back function which will return vector of dimension
417 : * @param request_only_trainable true when only request trainable weight
418 : */
419 : void requestOptimizerVariable(
420 : std::function<std::vector<TensorDim>(const TensorDim &)> cb,
421 : bool request_only_trainable = true);
422 :
423 : /**
424 : * @brief Feed inputs and labels to the graph
425 : *
426 : * @param inputs Input data
427 : * @param labels Label data
428 : */
429 : void setInputsLabels(const std::vector<Tensor> &inputs,
430 : const std::vector<Tensor> &labels);
431 :
432 : /**
433 : * @brief Feed inputs and labels to the graph
434 : *
435 : * @param inputs Input data
436 : * @param labels Label data
437 : */
438 : void setInputsLabels(sharedConstTensors &inputs, sharedConstTensors &labels);
439 :
440 : /**
441 : * @brief Get the Output Tensors list for the graph
442 : *
443 : * @return std::vector<Tensor> List of output tensors
444 : * @note this tensor list is analogous to the label list
445 : */
446 : std::vector<Tensor> getOutputTensors() const;
447 :
448 : /**
449 : * @brief return model tensor type
450 : *
451 : * @return TensorDim::Format NCHW or NHWC
452 : */
453 4946 : std::array<std::string, 3> getTensorType() {
454 4946 : return {tensor_format, tensor_dtype[0], tensor_dtype[1]};
455 14838 : };
456 :
457 : /**
458 : * @brief Flush data to the device
459 : *
460 : */
461 : void flushCache();
462 :
463 : /**
464 : * @brief Flush data to the device except order
465 : *
466 : * @param order except execution order
467 : */
468 : void flushCacheExcept(const unsigned int order);
469 :
470 : /**
471 : * @brief Load data of order to the device
472 : *
473 : * @param order execution order
474 : */
475 : void LoadTensors(const unsigned int order,
476 : unsigned int remainder_lookahead = 0);
477 :
478 : /**
479 : * @brief check data of order is loaded
480 : *
481 : * @param order execution order
482 : */
483 : bool checkLoadComplete(const unsigned int order);
484 :
485 : /**
486 : * @brief inactive the elem
487 : *
488 : */
489 : bool inActive(unsigned int order);
490 :
491 : /**
492 : * @brief check data of order is Unloaded
493 : *
494 : * @param order execution order
495 : */
496 : bool checkUnloadComplete(const unsigned int order);
497 :
498 : /**
499 : * @brief Load data of order to the device
500 : *
501 : * @param order execution order
502 : */
503 : void UnloadTensors(const unsigned int order);
504 :
505 : #ifdef ENABLE_TEST
506 : /**
507 : * @brief Get layer node's tenexecution orders
508 : *
509 : * @param lnode layer node
510 : * @note this is for test purpose only
511 : */
512 : std::map<std::string, std::vector<unsigned int>>
513 : getLayerExecutionOrders(const std::shared_ptr<LayerNode> &lnode);
514 : #endif // ENABLE_TEST
515 :
516 : /**
517 : * @brief reset the loss scale
518 : * @param[in] scale
519 : */
520 : void resetLossScale(float scale);
521 :
522 : /**
523 : * @brief check if it is mixed precision training
524 : */
525 7511 : bool isMixedPrecision() { return (!istrequal(tensor_dtype[1], "FP32")); }
526 :
527 : /**
528 : * @brief set FSU weight path
529 : *
530 : * @param path FSU weight file path
531 : */
532 0 : void setFsuWeightPath(const std::string &path) {
533 0 : tensor_manager->setFsuWeightPath(path);
534 0 : }
535 :
536 : /**
537 : * @brief set weight file offset for FSU loading
538 : *
539 : * @param offsets weight file offset
540 : */
541 0 : void setWeightOffset(std::vector<std::pair<size_t, size_t>> &offsets) {
542 0 : tensor_manager->setWeightOffset(offsets);
543 0 : }
544 :
545 : private:
546 : std::map<std::string, std::string> sub_in_out; /** This is map to identify
547 : input and output layer name of subgraph */
548 : std::shared_ptr<Manager> tensor_manager; /**< tensors manager */
549 :
550 : GraphCore graph; /** core graph object */
551 : bool compiled; /**< if the model graph is compiled */
552 : unsigned int batch_size; /**< current batch_size */
553 : unsigned int graph_exec_end; /**< Inclusive, last execution order of the
554 : given graph */
555 : LayerNode *backward_iter_end; /**< inclusive end node of the valid backward
556 : execution when initialized, nodes after
557 : this node does not required backwarding thus
558 : making it noop */
559 : LayerNode *forward_iter_end; /**< inclusive end node of the forward execution
560 : when initialize */
561 :
562 : /// @note *_list and *_dims must be synced at all times. Consider put it as a
563 : /// structure
564 : std::vector<std::string> label_list; /**< identifier for the model labels */
565 : std::vector<std::string> input_list; /**< identifier for the model inputs */
566 : std::vector<std::string> output_list; /**< identifier for the model outputs */
567 : std::vector<TensorDim> label_dims_; /**< graph label dimensions */
568 : std::vector<TensorDim> input_dims_; /**< graph input dimensions */
569 :
570 : bool optimize_memory; /**< optimize memory */
571 : ExecutionMode exec_mode; /**< execution mode with which the graph has been
572 : currently set or previously set */
573 :
574 : std::string tensor_format; /**< Model Tensor Format: NCHW or NHWC */
575 :
576 : std::vector<std::string> tensor_dtype; /**< Model Tensor Type: FP32, FP16 */
577 :
578 : std::unordered_map<std::string, int>
579 : profile_keys; /**< profile keys based on the layer type */
580 : std::vector<Weight *>
581 : lazy_weights; /**< weights with delayed grad update, e.g., gradient
582 : clipping, loss scaling */
583 : bool is_clip_grad;
584 : float loss_scale;
585 : unsigned int nan_count;
586 :
587 : /**
588 : * @brief topological sort
589 : * @param[in] ith index of LayerNode
590 : * @param[in] visited temp list
591 : * @param[in] stack for Node list to visit.
592 : */
593 : void topologicalSortUtil(unsigned int ith, std::vector<bool> &visited,
594 : std::stack<std::shared_ptr<LayerNode>> &Stack);
595 :
596 : /**
597 : * @brief check if graph is ready to compile.
598 : * @retval #ML_ERROR_NONE graph is ready to compile
599 : * @retval #ML_ERROR_INVALID_PARAMETER not ready to compile.
600 : */
601 : int isCompilable();
602 :
603 : /**
604 : * @brief check if the compiled graph is of correct form.
605 : * @retval #ML_ERROR_NONE graph is compiled correctly
606 : * @retval #ML_ERROR_INVALID_PARAMETER did not compile correctly
607 : */
608 : int checkCompiledGraph();
609 :
610 : /**
611 : * @brief mark nodes required for backwarding.
612 : */
613 : void markNodesForBackwarding();
614 :
615 : /**
616 : * @brief adding loss layer at last position
617 : * @param[in] loss_type loss type
618 : * @retval #ML_ERROR_NONE Successful.
619 : * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
620 : */
621 : int addLossLayer(const std::string &loss_type);
622 :
623 : /**
624 : * @brief set output connections for all the layers
625 : */
626 : void setOutputConnections();
627 :
628 : /**
629 : * @brief Ensure that layer has a name.
630 : * @param[in] layer Layer whose name is to be ensured to be valid
631 : * @param[in] prefix Prefix to be attached to the layer name
632 : * @param[in] postfix Postfix to be attached to the layer name
633 : * @param[in] force_rename If the layer must be forcefully rename
634 : * @details Ensures that the layer has a unique and a valid name. A valid
635 : * name pre-assigned to the layer can be changed if force_rename is enabled.
636 : */
637 : void ensureName(std::shared_ptr<Layer> layer, const std::string &prefix = "",
638 : const std::string &postfix = "", bool force_rename = false);
639 :
640 : /**
641 : * @brief Create new LayerNode and add into Graph
642 : * @param[in] layer shared_ptr of Layer
643 : */
644 : void addLayerNode(std::unique_ptr<Layer> layer);
645 :
646 : /**
647 : * @brief finalize already added loss layers
648 : *
649 : * @details This involves verify if the requirements of the added loss layers
650 : * match and merging loss layers with activation layers if needed.
651 : */
652 : void finalizeLossLayer();
653 :
654 : /**
655 : * @brief Set the order of execution for all the nodes in the graph
656 : *
657 : * @details This sets the order of execution using the order from the
658 : * topological sort. The order of forwarding matches the topological sort. The
659 : * order for backwarding is in the exact reverse order. The calcDerivative()
660 : * is expected to be called right after calcGradient().
661 : */
662 : void setExecutionOrder();
663 :
664 : /**
665 : * @brief Set external data to the given tensors with name
666 : *
667 : * @param data External data
668 : * @param names Names of the tensor to set the data to
669 : */
670 : void setExternalTensors(const std::vector<Tensor> &data,
671 : const std::vector<std::string> names);
672 :
673 : /**
674 : * @brief Optimize the graph memory utilization for in-place operations
675 : */
676 : void inPlaceOptimize();
677 :
678 : /**
679 : * @brief Check if the given node can execute in-place
680 : *
681 : * @param lnode node to check for in-place execution
682 : *
683 : * @return the mode of inplace for the layer
684 : */
685 : InPlaceType canExecuteInPlace(const std::shared_ptr<LayerNode> &lnode);
686 :
687 : /**
688 : * @brief compute optimized backward end. This function calculated the valid
689 : * end of the graph backward, if memory_optimize is unset, this returns
690 : * beginning of the graph node.
691 : *
692 : * @return end of the backward iter;
693 : */
694 : LayerNode *computeBackwardEnd();
695 : };
696 : } // namespace nntrainer
697 :
698 : #endif /* __cplusplus */
699 : #endif /* __NETWORK_GRAPH_H__ */
|