diff --git a/test/include/nntrainer_test_util.h b/test/include/nntrainer_test_util.h index 64e56fdc43..7845b63647 100644 --- a/test/include/nntrainer_test_util.h +++ b/test/include/nntrainer_test_util.h @@ -333,5 +333,34 @@ float mse(Ta *A, Tb *B, uint32_t size) { return mse; } +/** + * @brief A helper struct for performing static_cast operations on types. + * + * This struct provides a templated function that can be used to perform a + * static_cast operation between two types. It is intended to be used with the + * std::transform() function from the STL. + * + * @tparam T The target type to which the value will be converted. + */ +template // T models Any +struct static_cast_func { + /** + * @brief Performs a static_cast operation on a given value. + * + * This function takes a constant reference to a value of type T1, where T1 is + * a type that is statically convertible to T. It performs a static_cast + * operation on the value and returns the result as a value of type T. + * + * @tparam T1 The source type of the value being converted. + * @param[in] x The input value to convert. + * @return result of the static_cast operation as a value of type + * T. + */ + template // T1 models type statically convertible to T + T operator()(const T1 &x) const { + return static_cast(x); + } +}; + #endif /* __cplusplus */ #endif /* __NNTRAINER_TEST_UTIL_H__ */ diff --git a/test/unittest/unittest_nntrainer_tensor_v2.cpp b/test/unittest/unittest_nntrainer_tensor_v2.cpp index b8b89d1255..aa3bbc7539 100644 --- a/test/unittest/unittest_nntrainer_tensor_v2.cpp +++ b/test/unittest/unittest_nntrainer_tensor_v2.cpp @@ -321,6 +321,433 @@ TEST(nntrainer_Tensor, initialize_08_p) { EXPECT_EQ(golden, t); } +TEST(nntrainer_Tensor, multiply_i_01_p) { + int status = ML_ERROR_NONE; + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k); + + nntrainer::TensorV2 original; + original.copy(input); + + status = input.multiply_i(2.0); + EXPECT_EQ(status, ML_ERROR_NONE); + + float *data = original.getData(); + ASSERT_NE(nullptr, data); + float *indata = input.getData(); + ASSERT_NE(nullptr, indata); + + for (int i = 0; i < batch * channel * width * height; ++i) { + EXPECT_FLOAT_EQ(data[i] + data[i], indata[i]); + } +} + +TEST(nntrainer_Tensor, multiply_i_02_p) { + int status = ML_ERROR_NONE; + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k); + + nntrainer::TensorV2 original; + original.copy(input); + + status = input.multiply_i(input); + EXPECT_EQ(status, ML_ERROR_NONE); + + float *data = original.getData(); + ASSERT_NE(nullptr, data); + float *indata = input.getData(); + ASSERT_NE(nullptr, indata); + + for (int i = 0; i < batch * channel * width * height; ++i) { + EXPECT_FLOAT_EQ(data[i] * data[i], indata[i]); + } +} + +TEST(nntrainer_Tensor, multiply_i_03_n) { + int status = ML_ERROR_NONE; + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k); + + nntrainer::TensorV2 target2(batch, channel, height - 2, width - 1); + status = input.multiply_i(target2); + + EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); +} + +TEST(nntrainer_Tensor, multiply_i_broadcast_01_p) { + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5); + nntrainer::TensorV2 m = rangedV2(1, 2, 4, 5); + float answer_data[] = { + 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, + 144, 169, 196, 225, 256, 289, 324, 361, 400, 441, 484, 529, + 576, 625, 676, 729, 784, 841, 900, 961, 1024, 1089, 1156, 1225, + 1296, 1369, 1444, 1521, 0, 41, 84, 129, 176, 225, 276, 329, + 384, 441, 500, 561, 624, 689, 756, 825, 896, 969, 1044, 1121, + 1200, 1281, 1364, 1449, 1536, 1625, 1716, 1809, 1904, 2001, 2100, 2201, + 2304, 2409, 2516, 2625, 2736, 2849, 2964, 3081, 0, 81, 164, 249, + 336, 425, 516, 609, 704, 801, 900, 1001, 1104, 1209, 1316, 1425, + 1536, 1649, 1764, 1881, 2000, 2121, 2244, 2369, 2496, 2625, 2756, 2889, + 3024, 3161, 3300, 3441, 3584, 3729, 3876, 4025, 4176, 4329, 4484, 4641}; + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5); + nntrainer::TensorV2 m = rangedV2(3, 1, 4, 5); + float answer_data[] = { + 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, + 144, 169, 196, 225, 256, 289, 324, 361, 0, 21, 44, 69, + 96, 125, 156, 189, 224, 261, 300, 341, 384, 429, 476, 525, + 576, 629, 684, 741, 800, 861, 924, 989, 1056, 1125, 1196, 1269, + 1344, 1421, 1500, 1581, 1664, 1749, 1836, 1925, 2016, 2109, 2204, 2301, + 1200, 1281, 1364, 1449, 1536, 1625, 1716, 1809, 1904, 2001, 2100, 2201, + 2304, 2409, 2516, 2625, 2736, 2849, 2964, 3081, 3200, 3321, 3444, 3569, + 3696, 3825, 3956, 4089, 4224, 4361, 4500, 4641, 4784, 4929, 5076, 5225, + 5376, 5529, 5684, 5841, 4000, 4141, 4284, 4429, 4576, 4725, 4876, 5029, + 5184, 5341, 5500, 5661, 5824, 5989, 6156, 6325, 6496, 6669, 6844, 7021}; + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5); + nntrainer::TensorV2 m = rangedV2(3, 2, 4, 1); + float answer_data[] = { + 0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 20, 22, + 24, 26, 28, 45, 48, 51, 54, 57, 80, 84, 88, 92, + 96, 125, 130, 135, 140, 145, 180, 186, 192, 198, 204, 245, + 252, 259, 266, 273, 320, 328, 336, 344, 352, 405, 414, 423, + 432, 441, 500, 510, 520, 530, 540, 605, 616, 627, 638, 649, + 720, 732, 744, 756, 768, 845, 858, 871, 884, 897, 980, 994, + 1008, 1022, 1036, 1125, 1140, 1155, 1170, 1185, 1280, 1296, 1312, 1328, + 1344, 1445, 1462, 1479, 1496, 1513, 1620, 1638, 1656, 1674, 1692, 1805, + 1824, 1843, 1862, 1881, 2000, 2020, 2040, 2060, 2080, 2205, 2226, 2247, + 2268, 2289, 2420, 2442, 2464, 2486, 2508, 2645, 2668, 2691, 2714, 2737}; + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5); + nntrainer::TensorV2 m = rangedV2(3, 1, 1, 5); + float answer_data[] = { + 0, 1, 4, 9, 16, 0, 6, 14, 24, 36, 0, 11, + 24, 39, 56, 0, 16, 34, 54, 76, 0, 21, 44, 69, + 96, 0, 26, 54, 84, 116, 0, 31, 64, 99, 136, 0, + 36, 74, 114, 156, 200, 246, 294, 344, 396, 225, 276, 329, + 384, 441, 250, 306, 364, 424, 486, 275, 336, 399, 464, 531, + 300, 366, 434, 504, 576, 325, 396, 469, 544, 621, 350, 426, + 504, 584, 666, 375, 456, 539, 624, 711, 800, 891, 984, 1079, + 1176, 850, 946, 1044, 1144, 1246, 900, 1001, 1104, 1209, 1316, 950, + 1056, 1164, 1274, 1386, 1000, 1111, 1224, 1339, 1456, 1050, 1166, 1284, + 1404, 1526, 1100, 1221, 1344, 1469, 1596, 1150, 1276, 1404, 1534, 1666}; + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5); + nntrainer::TensorV2 m = rangedV2(1, 2, 1, 5); + float answer_data[] = { + 0, 1, 4, 9, 16, 0, 6, 14, 24, 36, 0, 11, 24, 39, + 56, 0, 16, 34, 54, 76, 100, 126, 154, 184, 216, 125, 156, 189, + 224, 261, 150, 186, 224, 264, 306, 175, 216, 259, 304, 351, 0, 41, + 84, 129, 176, 0, 46, 94, 144, 196, 0, 51, 104, 159, 216, 0, + 56, 114, 174, 236, 300, 366, 434, 504, 576, 325, 396, 469, 544, 621, + 350, 426, 504, 584, 666, 375, 456, 539, 624, 711, 0, 81, 164, 249, + 336, 0, 86, 174, 264, 356, 0, 91, 184, 279, 376, 0, 96, 194, + 294, 396, 500, 606, 714, 824, 936, 525, 636, 749, 864, 981, 550, 666, + 784, 904, 1026, 575, 696, 819, 944, 1071}; + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5); + nntrainer::TensorV2 m = rangedV2(3, 1, 4, 1); + float answer_data[] = { + 0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 20, 22, + 24, 26, 28, 45, 48, 51, 54, 57, 0, 0, 0, 0, + 0, 25, 26, 27, 28, 29, 60, 62, 64, 66, 68, 105, + 108, 111, 114, 117, 160, 164, 168, 172, 176, 225, 230, 235, + 240, 245, 300, 306, 312, 318, 324, 385, 392, 399, 406, 413, + 240, 244, 248, 252, 256, 325, 330, 335, 340, 345, 420, 426, + 432, 438, 444, 525, 532, 539, 546, 553, 640, 648, 656, 664, + 672, 765, 774, 783, 792, 801, 900, 910, 920, 930, 940, 1045, + 1056, 1067, 1078, 1089, 800, 808, 816, 824, 832, 945, 954, 963, + 972, 981, 1100, 1110, 1120, 1130, 1140, 1265, 1276, 1287, 1298, 1309}; + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5); + nntrainer::TensorV2 m = rangedV2(1, 1, 1, 5); + float answer_data[] = { + 0, 1, 4, 9, 16, 0, 6, 14, 24, 36, 0, 11, 24, 39, 56, + 0, 16, 34, 54, 76, 0, 21, 44, 69, 96, 0, 26, 54, 84, 116, + 0, 31, 64, 99, 136, 0, 36, 74, 114, 156, 0, 41, 84, 129, 176, + 0, 46, 94, 144, 196, 0, 51, 104, 159, 216, 0, 56, 114, 174, 236, + 0, 61, 124, 189, 256, 0, 66, 134, 204, 276, 0, 71, 144, 219, 296, + 0, 76, 154, 234, 316, 0, 81, 164, 249, 336, 0, 86, 174, 264, 356, + 0, 91, 184, 279, 376, 0, 96, 194, 294, 396, 0, 101, 204, 309, 416, + 0, 106, 214, 324, 436, 0, 111, 224, 339, 456, 0, 116, 234, 354, 476}; + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5); + nntrainer::TensorV2 m = rangedV2(1, 2, 1, 1); + float answer_data[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119}; + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5); + nntrainer::TensorV2 m = rangedV2(3, 1, 1, 1); + float answer_data[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 160, 162, 164, 166, + 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, + 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, + 224, 226, 228, 230, 232, 234, 236, 238}; + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 5, 1, 4); + nntrainer::TensorV2 t = rangedV2(3, 5, 1, 4); + nntrainer::TensorV2 m = rangedV2(3, 1, 1, 4); + float answer_data[] = {0, 1, 4, 9, 0, 5, 12, 21, 0, 9, + 20, 33, 0, 13, 28, 45, 0, 17, 36, 57, + 80, 105, 132, 161, 96, 125, 156, 189, 112, 145, + 180, 217, 128, 165, 204, 245, 144, 185, 228, 273, + 320, 369, 420, 473, 352, 405, 460, 517, 384, 441, + 500, 561, 416, 477, 540, 605, 448, 513, 580, 649}; + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } +} + +TEST(nntrainer_Tensor, multiply_i_broadcast_not_supported_01_n) { + nntrainer::TensorV2 target(3, 1, 3, 1); + nntrainer::TensorV2 target2(3, 1, 3, 3); + + EXPECT_EQ(target.multiply_i(target2), ML_ERROR_INVALID_PARAMETER); +} + +TEST(nntrainer_Tensor, multiply_i_broadcast_not_broadcastable_02_n) { + nntrainer::TensorV2 target(3, 2, 4, 5); + nntrainer::TensorV2 target2(3, 2, 3, 1); + + EXPECT_EQ(target.multiply_i(target2), ML_ERROR_INVALID_PARAMETER); +} + +TEST(nntrainer_Tensor, multiply_01_p) { + int status = ML_ERROR_NONE; + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k); + + nntrainer::TensorV2 result = input.multiply(0.0); + if (result.getValue(0, 0, 1, 1) != 0.0) + status = ML_ERROR_RESULT_OUT_OF_RANGE; + EXPECT_EQ(status, ML_ERROR_NONE); +} + +TEST(nntrainer_Tensor, multiply_02_p) { + int status = ML_ERROR_NONE; + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1); + + nntrainer::TensorV2 result = input.multiply(input); + + float *data = result.getData(); + ASSERT_NE(nullptr, data); + float *indata = input.getData(); + ASSERT_NE(nullptr, indata); + + for (int i = 0; i < batch * height * width; ++i) { + if (data[i] != indata[i] * indata[i]) { + status = ML_ERROR_RESULT_OUT_OF_RANGE; + break; + } + } + + EXPECT_EQ(status, ML_ERROR_NONE); +} + +TEST(nntrainer_Tensor, multiply_03_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1); + + nntrainer::TensorV2 test(batch - 1, height - 1, width - 1); + + EXPECT_THROW({ input.multiply(test); }, std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_04_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorDim dim(batch, channel, height, width); + + nntrainer::TensorV2 input(batch, channel, height, 2 * width); + nntrainer::TensorV2 shared_input = + input.getSharedDataTensor(dim, 0, false, ""); + nntrainer::TensorV2 test(dim); + + EXPECT_THROW(shared_input.multiply(test), std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_05_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorDim dim(batch, channel, height, width); + + nntrainer::TensorV2 input(dim); + nntrainer::TensorV2 test(batch, channel, height, 2 * width); + nntrainer::TensorV2 shared_test = test.getSharedDataTensor(dim, 0, false, ""); + + EXPECT_THROW(input.multiply(shared_test), std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_06_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorDim dim(batch, channel, height, width); + + nntrainer::TensorV2 input(dim, false); + nntrainer::TensorV2 test(dim); + GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 1); + + EXPECT_THROW(input.multiply(test), std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_07_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorDim dim(batch, channel, height, width); + + nntrainer::TensorV2 input(dim); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1); + nntrainer::TensorV2 test(dim, false); + + EXPECT_THROW(input.multiply(test), std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_08_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorDim dim(batch, channel, height, width); + + nntrainer::TensorV2 input(dim); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1); + nntrainer::TensorV2 test(dim); + GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 2); + nntrainer::TensorV2 output(dim, false); + + EXPECT_THROW(input.multiply(test, output), std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_float_01_p) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1); + + nntrainer::TensorV2 expected(batch, channel, height, width); + GEN_TEST_INPUT(expected, (i * (batch * height) + j * (width) + k + 1) * 2); + + nntrainer::TensorV2 result = input.multiply(2.0); + + EXPECT_EQ(result, expected); +} + int main(int argc, char **argv) { int result = -1; diff --git a/test/unittest/unittest_nntrainer_tensor_v2_fp16.cpp b/test/unittest/unittest_nntrainer_tensor_v2_fp16.cpp index d2bca517a3..adb848dd47 100644 --- a/test/unittest/unittest_nntrainer_tensor_v2_fp16.cpp +++ b/test/unittest/unittest_nntrainer_tensor_v2_fp16.cpp @@ -394,6 +394,519 @@ TEST(nntrainer_Tensor, initialize_08_p) { EXPECT_EQ(golden, t); } +TEST(nntrainer_Tensor, multiply_i_01_fp16_p) { + int status = ML_ERROR_NONE; + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k); + + nntrainer::TensorV2 original; + original.copy(input); + + status = input.multiply_i(2.0); + EXPECT_EQ(status, ML_ERROR_NONE); + + _FP16 *data = original.getData<_FP16>(); + ASSERT_NE(nullptr, data); + _FP16 *indata = input.getData<_FP16>(); + ASSERT_NE(nullptr, indata); + + for (int i = 0; i < batch * height * width * channel; ++i) { + EXPECT_FLOAT_EQ(data[i] + data[i], indata[i]); + } +} + +TEST(nntrainer_Tensor, multiply_i_02_fp16_p) { + int status = ML_ERROR_NONE; + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k); + + nntrainer::TensorV2 original; + original.copy(input); + + status = input.multiply_i(input); + EXPECT_EQ(status, ML_ERROR_NONE); + + _FP16 *data = original.getData<_FP16>(); + ASSERT_NE(nullptr, data); + _FP16 *indata = input.getData<_FP16>(); + ASSERT_NE(nullptr, indata); + + for (int i = 0; i < batch * height * width * channel; ++i) { + EXPECT_FLOAT_EQ(data[i] * data[i], indata[i]); + } +} + +TEST(nntrainer_Tensor, multiply_i_03_fp16_n) { + int status = ML_ERROR_NONE; + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k); + + nntrainer::TensorV2 target2(batch, channel, height - 2, width - 1, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + status = input.multiply_i(target2); + + EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); +} + +TEST(nntrainer_Tensor, multiply_i_broadcast_01_fp16_p) { + unsigned int N = 120; + _FP16 *answer_data = new _FP16[N]; + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 m = rangedV2(1, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + + float float_data[] = { + 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, + 144, 169, 196, 225, 256, 289, 324, 361, 400, 441, 484, 529, + 576, 625, 676, 729, 784, 841, 900, 961, 1024, 1089, 1156, 1225, + 1296, 1369, 1444, 1521, 0, 41, 84, 129, 176, 225, 276, 329, + 384, 441, 500, 561, 624, 689, 756, 825, 896, 969, 1044, 1121, + 1200, 1281, 1364, 1449, 1536, 1625, 1716, 1809, 1904, 2001, 2100, 2201, + 2304, 2409, 2516, 2625, 2736, 2849, 2964, 3081, 0, 81, 164, 249, + 336, 425, 516, 609, 704, 801, 900, 1001, 1104, 1209, 1316, 1425, + 1536, 1649, 1764, 1881, 2000, 2121, 2244, 2369, 2496, 2625, 2756, 2889, + 3024, 3161, 3300, 3441, 3584, 3729, 3876, 4025, 4176, 4329, 4484, 4641}; + + std::transform(float_data, float_data + N, answer_data, + static_cast_func<_FP16>()); + + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 m = rangedV2(3, 1, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + float float_data[] = { + 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, + 144, 169, 196, 225, 256, 289, 324, 361, 0, 21, 44, 69, + 96, 125, 156, 189, 224, 261, 300, 341, 384, 429, 476, 525, + 576, 629, 684, 741, 800, 861, 924, 989, 1056, 1125, 1196, 1269, + 1344, 1421, 1500, 1581, 1664, 1749, 1836, 1925, 2016, 2109, 2204, 2301, + 1200, 1281, 1364, 1449, 1536, 1625, 1716, 1809, 1904, 2001, 2100, 2201, + 2304, 2409, 2516, 2625, 2736, 2849, 2964, 3081, 3200, 3321, 3444, 3569, + 3696, 3825, 3956, 4089, 4224, 4361, 4500, 4641, 4784, 4929, 5076, 5225, + 5376, 5529, 5684, 5841, 4000, 4141, 4284, 4429, 4576, 4725, 4876, 5029, + 5184, 5341, 5500, 5661, 5824, 5989, 6156, 6325, 6496, 6669, 6844, 7021}; + std::transform(float_data, float_data + N, answer_data, + static_cast_func<_FP16>()); + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 m = rangedV2(3, 2, 4, 1, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + float float_data[] = { + 0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 20, 22, + 24, 26, 28, 45, 48, 51, 54, 57, 80, 84, 88, 92, + 96, 125, 130, 135, 140, 145, 180, 186, 192, 198, 204, 245, + 252, 259, 266, 273, 320, 328, 336, 344, 352, 405, 414, 423, + 432, 441, 500, 510, 520, 530, 540, 605, 616, 627, 638, 649, + 720, 732, 744, 756, 768, 845, 858, 871, 884, 897, 980, 994, + 1008, 1022, 1036, 1125, 1140, 1155, 1170, 1185, 1280, 1296, 1312, 1328, + 1344, 1445, 1462, 1479, 1496, 1513, 1620, 1638, 1656, 1674, 1692, 1805, + 1824, 1843, 1862, 1881, 2000, 2020, 2040, 2060, 2080, 2205, 2226, 2247, + 2268, 2289, 2420, 2442, 2464, 2486, 2508, 2645, 2668, 2691, 2714, 2737}; + std::transform(float_data, float_data + N, answer_data, + static_cast_func<_FP16>()); + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 m = rangedV2(3, 1, 1, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + float float_data[] = { + 0, 1, 4, 9, 16, 0, 6, 14, 24, 36, 0, 11, + 24, 39, 56, 0, 16, 34, 54, 76, 0, 21, 44, 69, + 96, 0, 26, 54, 84, 116, 0, 31, 64, 99, 136, 0, + 36, 74, 114, 156, 200, 246, 294, 344, 396, 225, 276, 329, + 384, 441, 250, 306, 364, 424, 486, 275, 336, 399, 464, 531, + 300, 366, 434, 504, 576, 325, 396, 469, 544, 621, 350, 426, + 504, 584, 666, 375, 456, 539, 624, 711, 800, 891, 984, 1079, + 1176, 850, 946, 1044, 1144, 1246, 900, 1001, 1104, 1209, 1316, 950, + 1056, 1164, 1274, 1386, 1000, 1111, 1224, 1339, 1456, 1050, 1166, 1284, + 1404, 1526, 1100, 1221, 1344, 1469, 1596, 1150, 1276, 1404, 1534, 1666}; + std::transform(float_data, float_data + N, answer_data, + static_cast_func<_FP16>()); + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 m = rangedV2(1, 2, 1, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + float float_data[] = { + 0, 1, 4, 9, 16, 0, 6, 14, 24, 36, 0, 11, 24, 39, + 56, 0, 16, 34, 54, 76, 100, 126, 154, 184, 216, 125, 156, 189, + 224, 261, 150, 186, 224, 264, 306, 175, 216, 259, 304, 351, 0, 41, + 84, 129, 176, 0, 46, 94, 144, 196, 0, 51, 104, 159, 216, 0, + 56, 114, 174, 236, 300, 366, 434, 504, 576, 325, 396, 469, 544, 621, + 350, 426, 504, 584, 666, 375, 456, 539, 624, 711, 0, 81, 164, 249, + 336, 0, 86, 174, 264, 356, 0, 91, 184, 279, 376, 0, 96, 194, + 294, 396, 500, 606, 714, 824, 936, 525, 636, 749, 864, 981, 550, 666, + 784, 904, 1026, 575, 696, 819, 944, 1071}; + std::transform(float_data, float_data + N, answer_data, + static_cast_func<_FP16>()); + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 m = rangedV2(3, 1, 4, 1, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + float float_data[] = { + 0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 20, 22, + 24, 26, 28, 45, 48, 51, 54, 57, 0, 0, 0, 0, + 0, 25, 26, 27, 28, 29, 60, 62, 64, 66, 68, 105, + 108, 111, 114, 117, 160, 164, 168, 172, 176, 225, 230, 235, + 240, 245, 300, 306, 312, 318, 324, 385, 392, 399, 406, 413, + 240, 244, 248, 252, 256, 325, 330, 335, 340, 345, 420, 426, + 432, 438, 444, 525, 532, 539, 546, 553, 640, 648, 656, 664, + 672, 765, 774, 783, 792, 801, 900, 910, 920, 930, 940, 1045, + 1056, 1067, 1078, 1089, 800, 808, 816, 824, 832, 945, 954, 963, + 972, 981, 1100, 1110, 1120, 1130, 1140, 1265, 1276, 1287, 1298, 1309}; + std::transform(float_data, float_data + N, answer_data, + static_cast_func<_FP16>()); + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 m = rangedV2(1, 1, 1, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + float float_data[] = { + 0, 1, 4, 9, 16, 0, 6, 14, 24, 36, 0, 11, 24, 39, 56, + 0, 16, 34, 54, 76, 0, 21, 44, 69, 96, 0, 26, 54, 84, 116, + 0, 31, 64, 99, 136, 0, 36, 74, 114, 156, 0, 41, 84, 129, 176, + 0, 46, 94, 144, 196, 0, 51, 104, 159, 216, 0, 56, 114, 174, 236, + 0, 61, 124, 189, 256, 0, 66, 134, 204, 276, 0, 71, 144, 219, 296, + 0, 76, 154, 234, 316, 0, 81, 164, 249, 336, 0, 86, 174, 264, 356, + 0, 91, 184, 279, 376, 0, 96, 194, 294, 396, 0, 101, 204, 309, 416, + 0, 106, 214, 324, 436, 0, 111, 224, 339, 456, 0, 116, 234, 354, 476}; + std::transform(float_data, float_data + N, answer_data, + static_cast_func<_FP16>()); + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 m = rangedV2(1, 2, 1, 1, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + float float_data[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119}; + std::transform(float_data, float_data + N, answer_data, + static_cast_func<_FP16>()); + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 t = rangedV2(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 m = rangedV2(3, 1, 1, 1, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + float float_data[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 160, 162, 164, 166, + 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, + 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, + 224, 226, 228, 230, 232, 234, 236, 238}; + std::transform(float_data, float_data + N, answer_data, + static_cast_func<_FP16>()); + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + { + nntrainer::TensorDim ref_dim(3, 5, 1, 4, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 t = rangedV2(3, 5, 1, 4, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 m = rangedV2(3, 1, 1, 4, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + float float_data[] = {0, 1, 4, 9, 0, 5, 12, 21, 0, 9, + 20, 33, 0, 13, 28, 45, 0, 17, 36, 57, + 80, 105, 132, 161, 96, 125, 156, 189, 112, 145, + 180, 217, 128, 165, 204, 245, 144, 185, 228, 273, + 320, 369, 420, 473, 352, 405, 460, 517, 384, 441, + 500, 561, 416, 477, 540, 605, 448, 513, 580, 649}; + std::transform(float_data, float_data + 60, answer_data, + static_cast_func<_FP16>()); + nntrainer::TensorV2 answer(ref_dim, answer_data); + int status = t.multiply_i(m); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(t, answer); + } + delete[] answer_data; +} + +TEST(nntrainer_Tensor, multiply_i_broadcast_not_supported_01_n) { + + nntrainer::TensorV2 target(3, 1, 3, 1, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 target2(3, 1, 3, 3, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + + EXPECT_EQ(target.multiply_i(target2), ML_ERROR_INVALID_PARAMETER); +} + +TEST(nntrainer_Tensor, multiply_i_broadcast_not_broadcastable_02_n) { + nntrainer::TensorV2 target(3, 2, 4, 5, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 target2(3, 2, 3, 1, nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + + EXPECT_EQ(target.multiply_i(target2), ML_ERROR_INVALID_PARAMETER); +} + +TEST(nntrainer_Tensor, multiply_01_p) { + int status = ML_ERROR_NONE; + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k); + + nntrainer::TensorV2 result = input.multiply(0.0); + if (result.getValue<_FP16>(0, 0, 1, 1) != 0.0) + status = ML_ERROR_RESULT_OUT_OF_RANGE; + EXPECT_EQ(status, ML_ERROR_NONE); +} + +TEST(nntrainer_Tensor, multiply_02_p) { + int status = ML_ERROR_NONE; + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1); + + nntrainer::TensorV2 result = input.multiply(input); + + _FP16 *data = result.getData<_FP16>(); + ASSERT_NE(nullptr, data); + _FP16 *indata = input.getData<_FP16>(); + ASSERT_NE(nullptr, indata); + + for (int i = 0; i < batch * height * width; ++i) { + if (data[i] != indata[i] * indata[i]) { + status = ML_ERROR_RESULT_OUT_OF_RANGE; + break; + } + } + + EXPECT_EQ(status, ML_ERROR_NONE); +} + +TEST(nntrainer_Tensor, multiply_03_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1); + + nntrainer::TensorV2 test(batch - 1, height - 1, width - 1, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + + EXPECT_THROW({ input.multiply(test); }, std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_04_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorDim dim(batch, channel, height, width); + + nntrainer::TensorV2 input(batch, channel, height, 2 * width, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 shared_input = + input.getSharedDataTensor(dim, 0, false, ""); + nntrainer::TensorV2 test(dim); + + EXPECT_THROW(shared_input.multiply(test), std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_05_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorDim dim(batch, channel, height, width); + + nntrainer::TensorV2 input(dim); + nntrainer::TensorV2 test(batch, channel, height, 2 * width, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + nntrainer::TensorV2 shared_test = test.getSharedDataTensor(dim, 0, false, ""); + + EXPECT_THROW(input.multiply(shared_test), std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_06_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorDim dim(batch, channel, height, width); + + nntrainer::TensorV2 input(dim, false); + nntrainer::TensorV2 test(dim); + GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 1); + + EXPECT_THROW(input.multiply(test), std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_07_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorDim dim(batch, channel, height, width); + + nntrainer::TensorV2 input(dim); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1); + nntrainer::TensorV2 test(dim, false); + + EXPECT_THROW(input.multiply(test), std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_08_n) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorDim dim(batch, channel, height, width); + + nntrainer::TensorV2 input(dim); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1); + nntrainer::TensorV2 test(dim); + GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 2); + nntrainer::TensorV2 output(dim, false); + + EXPECT_THROW(input.multiply(test, output), std::invalid_argument); +} + +TEST(nntrainer_Tensor, multiply_float_01_p) { + int batch = 3; + int channel = 1; + int height = 3; + int width = 10; + + nntrainer::TensorV2 input(batch, channel, height, width, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1); + + nntrainer::TensorV2 expected(batch, channel, height, width, + nntrainer::Tformat::NCHW, + nntrainer::Tdatatype::FP16); + GEN_TEST_INPUT(expected, (i * (batch * height) + j * (width) + k + 1) * 2); + + nntrainer::TensorV2 result = input.multiply(2.0); + + EXPECT_EQ(result, expected); +} + int main(int argc, char **argv) { int result = -1;