Skip to content

Commit 4d568bf

Browse files
committed
[WIP][coverity] fix coverity issues
wip Signed-off-by: Seungbaek Hong <sb92.hong@samsung.com>
1 parent 6be8e84 commit 4d568bf

File tree

5 files changed

+14
-4
lines changed

5 files changed

+14
-4
lines changed

nntrainer/layers/gru.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,8 @@ void GRULayer::finalize(InitLayerContext &context) {
9494
const TensorDim &input_dim = context.getInputDimensions()[0];
9595
const unsigned int batch_size = input_dim.batch();
9696
const unsigned int max_timestep = input_dim.height();
97+
NNTR_THROW_IF(max_timestep < 1, std::runtime_error)
98+
<< "max timestep must be greator than 0 in gru layer.";
9799
const unsigned int feature_size = input_dim.width();
98100

99101
// if return_sequences == False :

nntrainer/layers/lstm.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -454,6 +454,8 @@ void LSTMLayer::finalize(InitLayerContext &context) {
454454
if (!std::get<props::MaxTimestep>(lstm_props).empty())
455455
max_timestep =
456456
std::max(max_timestep, std::get<props::MaxTimestep>(lstm_props).get());
457+
NNTR_THROW_IF(max_timestep < 1, std::runtime_error)
458+
<< "max timestep must be greator than 0 in lstm layer.";
457459
std::get<props::MaxTimestep>(lstm_props).set(max_timestep);
458460
const unsigned int feature_size = input_dim.width();
459461

nntrainer/layers/rnn.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,8 @@ void RNNLayer::finalize(InitLayerContext &context) {
7777
const TensorDim &input_dim = context.getInputDimensions()[SINGLE_INOUT_IDX];
7878
const unsigned int batch_size = input_dim.batch();
7979
const unsigned int max_timestep = input_dim.height();
80+
NNTR_THROW_IF(max_timestep < 1, std::runtime_error)
81+
<< "max timestep must be greator than 0 in rnn layer.";
8082
const unsigned int feature_size = input_dim.width();
8183

8284
// output_dim = [ batch, 1, (return_sequences ? time_iteration : 1), unit ]

nntrainer/utils/util_func.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,8 @@ char *getRealpath(const char *name, char *resolved) {
210210
#ifdef _WIN32
211211
return _fullpath(resolved, name, MAX_PATH_LENGTH);
212212
#else
213-
return realpath(name, resolved);
213+
resolved = realpath(name, nullptr);
214+
return resolved;
214215
#endif
215216
}
216217

test/unittest/compiler/unittest_tflite_export.cpp

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <node_exporter.h>
2424
#include <optimizer.h>
2525
#include <realizer.h>
26+
#include <stdlib.h>
2627

2728
#include <nntrainer_test_util.h>
2829

@@ -43,6 +44,8 @@ std::vector<float> ans;
4344
std::vector<float *> in_f;
4445
std::vector<float *> l_f;
4546

47+
unsigned int seed = 0;
48+
4649
/**
4750
* @brief make "key=value" from key and value
4851
*
@@ -155,7 +158,7 @@ TEST(nntrainerInterpreterTflite, simple_fc) {
155158
float *nntr_input = new float[data_size];
156159

157160
for (unsigned int i = 0; i < data_size; i++) {
158-
auto rand_float = static_cast<float>(std::rand() / (RAND_MAX + 1.0));
161+
auto rand_float = static_cast<float>(rand_r(&seed) / (RAND_MAX + 1.0));
159162
input_data.push_back(rand_float);
160163
nntr_input[i] = rand_float;
161164
}
@@ -284,7 +287,7 @@ TEST(nntrainerInterpreterTflite, part_of_resnet_0) {
284287
float *nntr_input = new float[data_size];
285288

286289
for (unsigned int i = 0; i < data_size; i++) {
287-
auto rand_float = static_cast<float>(std::rand() / (RAND_MAX + 1.0));
290+
auto rand_float = static_cast<float>(rand_r(&seed) / (RAND_MAX + 1.0));
288291
input_data.push_back(rand_float);
289292
nntr_input[i] = rand_float;
290293
}
@@ -365,7 +368,7 @@ TEST(nntrainerInterpreterTflite, MNIST_FULL_TEST) {
365368
float nntr_input[28 * 28];
366369

367370
for (unsigned int i = 0; i < data_size; i++) {
368-
auto rand_float = static_cast<float>(std::rand() / (RAND_MAX + 1.0));
371+
auto rand_float = static_cast<float>(rand_r(&seed) / (RAND_MAX + 1.0));
369372
input_data.push_back(rand_float);
370373
nntr_input[i] = rand_float;
371374
}

0 commit comments

Comments
 (0)