Skip to content

Commit

Permalink
[property] Add model_property loss_scale validation logic
Browse files Browse the repository at this point in the history
Add logic that check validation on loss_scale at setProperty stage

**Self evaluation:**
1. Build test:	 [X]Passed [ ]Failed [ ]Skipped
2. Run test:	 [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Donghak PARK <[email protected]>
  • Loading branch information
DonghakPark committed Nov 28, 2024
1 parent 1ec6461 commit b346fe1
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 4 deletions.
5 changes: 5 additions & 0 deletions nntrainer/models/model_common_properties.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,4 +44,9 @@ ModelTensorDataType::ModelTensorDataType(ModelTensorDataTypeInfo::Enum value) {
}
LossScale::LossScale(float value) { set(value); }

bool LossScale::isValid(const float &value) const {
ml_loge("Loss scale cannot be 0");
return value != 0;
}

} // namespace nntrainer::props
8 changes: 8 additions & 0 deletions nntrainer/models/model_common_properties.h
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,14 @@ class LossScale : public Property<float> {
LossScale(float value = 1.0f);
static constexpr const char *key = "loss_scale"; /**< unique key to access */
using prop_tag = float_prop_tag; /**< property type */

/**
* @brief check if valid
*
* @param value value to check
* @return bool true if valid
*/
bool isValid(const float &value) const override;
};

} // namespace nntrainer::props
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,11 @@
#include <optimizer.h>
using namespace nntrainer;

/**
* Supported Layer list
* [InputLayer, BatchNormalizationLayer, FullyConnectedLayer, Conv1DLayer]
*/

TEST(mixed_precision, input_only_model_test) {

std::unique_ptr<ml::train::Model> nn =
Expand All @@ -44,6 +49,32 @@ TEST(mixed_precision, input_only_model_test) {
EXPECT_EQ(nn->reinitialize(), ML_ERROR_NONE);
}

TEST(mixed_precision, conv1d_model_test) {

std::unique_ptr<ml::train::Model> nn =
ml::train::createModel(ml::train::ModelType::NEURAL_NET, {"loss=mse"});
nn->setProperty(
{"batch_size=1", "model_tensor_type=FP16-FP16", "loss_scale=65536"});

auto graph = makeGraph({
{"input", {"name=in", "input_shape=3:1:3"}},
{"conv1d", {"name=conv1d_0", "filters=1", "kernel_size=3"}},
{"conv1d", {"name=conv1d_1", "filters=1", "kernel_size=1"}},
{"batch_normalization", {"name=bn_0", "epsilon=0.00001", "momentum=0.9"}},
{"batch_normalization", {"name=bn_1", "epsilon=0.00001", "momentum=0.9"}},
});

for (auto &node : graph) {
nn->addLayer(node);
}

nn->setOptimizer(ml::train::createOptimizer("adam", {"learning_rate = 0.1"}));

EXPECT_EQ(nn->compile(), ML_ERROR_NONE);
EXPECT_EQ(nn->initialize(), ML_ERROR_NONE);
EXPECT_EQ(nn->reinitialize(), ML_ERROR_NONE);
}

TEST(mixed_precision, loss_scale_test) {
std::unique_ptr<ml::train::Model> nn =
ml::train::createModel(ml::train::ModelType::NEURAL_NET, {"loss=mse"});
Expand All @@ -52,10 +83,8 @@ TEST(mixed_precision, loss_scale_test) {
"loss_scale=0"}),
std::invalid_argument);

EXPECT_NO_THROW(
nn->setProperty(
{"batch_size=1", "model_tensor_type=FP16-FP16", "loss_scale=65536"}),
std::invalid_argument);
EXPECT_NO_THROW(nn->setProperty(
{"batch_size=1", "model_tensor_type=FP16-FP16", "loss_scale=65536"}));
}

TEST(mixed_precision, model_tensor_type_test) {
Expand Down

0 comments on commit b346fe1

Please sign in to comment.