Skip to content

Commit

Permalink
[neuralnet] Verify optimizer during initialization according to execu…
Browse files Browse the repository at this point in the history
…tion mode.

- Updates the testing scope of optimization configuration from train()
  to initialize()
- Adds conditions to verify optimization settings and modifies several
  unit tests running under ExecutionMode::TRAIN without prior
optimization setup.

Signed-off-by: Eunju Yang <ej.yang@samsung.com>
  • Loading branch information
EunjuYang authored and jijoongmoon committed Jan 3, 2025
1 parent 35624c5 commit 252b2df
Show file tree
Hide file tree
Showing 6 changed files with 30 additions and 6 deletions.
19 changes: 15 additions & 4 deletions nntrainer/models/neuralnet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -262,10 +262,15 @@ int NeuralNetwork::initialize(ExecutionMode mode) {
model_graph.setBatchSize(
std::get<props::TrainingBatchSize>(model_flex_props));

// initialize optimizer and related variables
/// @todo: initialize should take a mode and check if mode is train but
/// optimizer is not given, make it as a hard error
if (opt) {
// If the execution mode is `train`, the optimizer and its relevant variables
// are initialized. Throws an error if the optimizer is not set for training;
// otherwise, it initializes
if (exec_mode == ExecutionMode::TRAIN) {

if (!opt) {
ml_loge("Optimizer should be set before initialization for training.");
return ML_ERROR_INVALID_PARAMETER;
}
/** TODO: update request of optimizer to be of same format as
* Layer::requestTensor */
opt->finalize();
Expand Down Expand Up @@ -1643,6 +1648,12 @@ void NeuralNetwork::exports(const ml::train::ExportMethods &method,
}
case ml::train::ExportMethods::METHOD_FLATBUFFER: {

/**
* @todo The current FLATBUFFER exporter only supports TRAIN execution mode.
* It should be updated to support both train and inference mode.
* It would be more natural to support inference by default since tflite is
* typically used solely for inference
*/
model_graph.deallocateTensors();
model_graph.allocateTensors(ExecutionMode::TRAIN);
break;
Expand Down
5 changes: 4 additions & 1 deletion test/ccapi/unittest_ccapi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,9 @@ TEST(nntrainer_ccapi, train_batch_size_update_after) {

/**
* @brief Neural Network Model Training
* @note Compilation without any argument sets default execution mode as train.
* The train mode requires optimizer to be set. Thus, both initialize and
* train throw errors.
*/
TEST(nntrainer_ccapi, train_with_config_02_n) {
std::unique_ptr<ml::train::Model> model;
Expand All @@ -427,7 +430,7 @@ TEST(nntrainer_ccapi, train_with_config_02_n) {

EXPECT_EQ(model->loadFromConfig(s.getIniName()), ML_ERROR_NONE);
EXPECT_EQ(model->compile(), ML_ERROR_NONE);
EXPECT_EQ(model->initialize(), ML_ERROR_NONE);
EXPECT_EQ(model->initialize(), ML_ERROR_INVALID_PARAMETER);
EXPECT_EQ(model->train(), ML_ERROR_INVALID_PARAMETER);
}

Expand Down
2 changes: 1 addition & 1 deletion test/tizen_capi/unittest_tizen_capi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -834,7 +834,7 @@ TEST(nntrainer_capi_nnmodel, getLayer_03_p) {
ml_train_model_h model;
ml_train_layer_h get_layer;

ScopedIni s("getLayer_03_p", {model_base, inputlayer});
ScopedIni s("getLayer_03_p", {model_base, inputlayer, optimizer});

status = ml_train_model_construct_with_conf(s.getIniName().c_str(), &model);
EXPECT_EQ(status, ML_ERROR_NONE);
Expand Down
6 changes: 6 additions & 0 deletions test/unittest/compiler/unittest_tflite_export.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,8 @@ TEST(nntrainerInterpreterTflite, simple_fc) {
nn_model->addLayer(createLayer("fully_connected",
{withKey("name", "fc1"), withKey("unit", 1)}));

auto optimizer = ml::train::createOptimizer("sgd", {"learning_rate=0.001"});
EXPECT_EQ(nn_model->setOptimizer(std::move(optimizer)), ML_ERROR_NONE);
EXPECT_EQ(nn_model->compile(), ML_ERROR_NONE);
EXPECT_EQ(nn_model->initialize(), ML_ERROR_NONE);

Expand Down Expand Up @@ -278,6 +280,8 @@ TEST(nntrainerInterpreterTflite, part_of_resnet_0) {
nn_model->addLayer(node);
}

auto optimizer = ml::train::createOptimizer("sgd", {"learning_rate=0.001"});
EXPECT_EQ(nn_model->setOptimizer(std::move(optimizer)), ML_ERROR_NONE);
EXPECT_EQ(nn_model->compile(), ML_ERROR_NONE);
EXPECT_EQ(nn_model->initialize(), ML_ERROR_NONE);

Expand Down Expand Up @@ -359,6 +363,8 @@ TEST(nntrainerInterpreterTflite, MNIST_FULL_TEST) {
nn_model->addLayer(createLayer(
"fully_connected", {withKey("name", "fc0"), withKey("unit", 10)}));

auto optimizer = ml::train::createOptimizer("sgd", {"learning_rate=0.001"});
EXPECT_EQ(nn_model->setOptimizer(std::move(optimizer)), ML_ERROR_NONE);
EXPECT_EQ(nn_model->compile(), ML_ERROR_NONE);
EXPECT_EQ(nn_model->initialize(), ML_ERROR_NONE);

Expand Down
2 changes: 2 additions & 0 deletions test/unittest/unittest_nntrainer_exe_order.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,8 @@ genModel(const std::vector<LayerRepresentation> &layers) {
model.addLayer(layer);
}

auto optimizer = ml::train::createOptimizer("sgd", {"learning_rate=0.001"});
model.setOptimizer(std::move(optimizer));
model.compile();
model.initialize();

Expand Down
2 changes: 2 additions & 0 deletions test/unittest/unittest_nntrainer_graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,8 @@ TEST(nntrainerGraphUnitTest, compile_twice) {
EXPECT_NO_THROW(nn_model->addLayer(node));
}

auto optimizer = ml::train::createOptimizer("sgd", {"learning_rate=0.001"});
EXPECT_EQ(nn_model->setOptimizer(std::move(optimizer)), ML_ERROR_NONE);
EXPECT_EQ(nn_model->compile(), ML_ERROR_NONE);
EXPECT_EQ(nn_model->initialize(), ML_ERROR_NONE);
try {
Expand Down

0 comments on commit 252b2df

Please sign in to comment.