From ab45ba706928da0f0f3c91377b4903e61c94ec93 Mon Sep 17 00:00:00 2001 From: nnsuite Date: Sun, 22 Dec 2024 20:24:43 +0000 Subject: [PATCH] 2024-12-22 : Update test coverage result. Signed-off-by: nnsuite --- .../c/src/index-sort-f.html | 38 +-- .../c/src/index-sort-l.html | 24 +- .../c/src/index.html | 24 +- .../c/src/ml-api-common.c.func-c.html | 30 +-- .../c/src/ml-api-common.c.func.html | 30 +-- .../c/src/ml-api-common.c.gcov.html | 244 ++++++++--------- .../ml-api-inference-internal.c.func-c.html | 14 +- .../src/ml-api-inference-internal.c.func.html | 14 +- .../src/ml-api-inference-internal.c.gcov.html | 68 ++--- .../ml-api-inference-pipeline.c.func-c.html | 20 +- .../src/ml-api-inference-pipeline.c.func.html | 20 +- .../src/ml-api-inference-pipeline.c.gcov.html | 250 +++++++++--------- .../src/ml-api-inference-single.c.func-c.html | 32 +-- .../c/src/ml-api-inference-single.c.func.html | 18 +- .../c/src/ml-api-inference-single.c.gcov.html | 94 +++---- .../ml-api-service-agent-client.c.func-c.html | 6 +- .../ml-api-service-agent-client.c.func.html | 6 +- .../ml-api-service-agent-client.c.gcov.html | 6 +- .../ml-api-service-extension.c.func-c.html | 6 +- .../src/ml-api-service-extension.c.func.html | 6 +- .../src/ml-api-service-extension.c.gcov.html | 6 +- .../ml-api-service-offloading.c.func-c.html | 8 +- .../src/ml-api-service-offloading.c.func.html | 8 +- .../src/ml-api-service-offloading.c.gcov.html | 30 +-- .../ml-api-service-query-client.c.func-c.html | 6 +- .../ml-api-service-query-client.c.func.html | 6 +- .../ml-api-service-query-client.c.gcov.html | 6 +- ...-service-training-offloading.c.func-c.html | 6 +- ...pi-service-training-offloading.c.func.html | 6 +- ...pi-service-training-offloading.c.gcov.html | 6 +- .../c/src/ml-api-service.c.func-c.html | 6 +- .../c/src/ml-api-service.c.func.html | 6 +- .../c/src/ml-api-service.c.gcov.html | 6 +- testresult/ml-api/cmd_line | 2 +- testresult/ml-api/coverage_badge.svg | 2 +- testresult/ml-api/index-sort-f.html | 24 +- testresult/ml-api/index-sort-l.html | 24 +- testresult/ml-api/index.html | 24 +- 38 files changed, 566 insertions(+), 566 deletions(-) diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html index b3e45a97e..e9b326b5d 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src @@ -28,21 +28,21 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: - 83.3 % + 83.1 % 5160 - 4296 + 4289 Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: - 96.9 % + 96.6 % 294 - 285 + 284 @@ -79,6 +79,18 @@ Total Hit + + ml-api-inference-single.c + +
75.1%75.1%
+ + 75.1 % + 855 + 642 + 92.3 % + 39 + 36 + ml-api-inference-pipeline.c @@ -91,18 +103,6 @@ 84 79 - - ml-api-inference-single.c - -
75.9%75.9%
- - 75.9 % - 855 - 649 - 94.9 % - 39 - 37 - ml-api-common.c diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html index 813f03333..ca4194ce9 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src @@ -28,21 +28,21 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: - 83.3 % + 83.1 % 5160 - 4296 + 4289 Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: - 96.9 % + 96.6 % 294 - 285 + 284 @@ -82,14 +82,14 @@ ml-api-inference-single.c -
75.9%75.9%
+
75.1%75.1%
- 75.9 % + 75.1 % 855 - 649 - 94.9 % + 642 + 92.3 % 39 - 37 + 36 ml-api-service-offloading.c diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html index 531c9aa81..fa58fefd0 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src @@ -28,21 +28,21 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: - 83.3 % + 83.1 % 5160 - 4296 + 4289 Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: - 96.9 % + 96.6 % 294 - 285 + 284 @@ -118,14 +118,14 @@ ml-api-inference-single.c -
75.9%75.9%
+
75.1%75.1%
- 75.9 % + 75.1 % 855 - 649 - 94.9 % + 642 + 92.3 % 39 - 37 + 36 ml-api-service-agent-client.c diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html index e5b201d9b..5e5f226c9 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 88.1 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 96.4 % @@ -317,7 +317,7 @@ ml_tensors_data_destroy - 481 + 485 @@ -380,77 +380,77 @@ ml_tensors_data_create - 6924 + 6492 ml_tensors_info_validate - 7018 + 6586 _ml_tensors_info_create_from - 7194 + 6762 ml_tensors_info_clone - 7207 + 6775 _ml_tensors_data_destroy_internal - 13920 + 13057 _ml_tensors_data_create_no_alloc - 13949 + 13082 ml_tensors_info_destroy - 21188 + 19893 ml_tensors_info_create - 21206 + 19907 _ml_tensors_info_free - 21227 + 19932 _ml_tensors_info_initialize - 21240 + 19941 _ml_tensors_info_create_internal - 21241 + 19942 diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html index 1e80ab4d9..29b1ba56f 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 88.1 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 96.4 % @@ -163,14 +163,14 @@ _ml_tensors_data_create_no_alloc - 13949 + 13082 _ml_tensors_data_destroy_internal - 13920 + 13057 @@ -184,28 +184,28 @@ _ml_tensors_info_create_from - 7194 + 6762 _ml_tensors_info_create_internal - 21241 + 19942 _ml_tensors_info_free - 21227 + 19932 _ml_tensors_info_initialize - 21240 + 19941 @@ -324,14 +324,14 @@ ml_tensors_data_create - 6924 + 6492 ml_tensors_data_destroy - 481 + 485 @@ -359,14 +359,14 @@ ml_tensors_info_clone - 7207 + 6775 ml_tensors_info_create - 21206 + 19907 @@ -380,7 +380,7 @@ ml_tensors_info_destroy - 21188 + 19893 @@ -450,7 +450,7 @@ ml_tensors_info_validate - 7018 + 6586 diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html index 8aedd29e5..921d2ff96 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 88.1 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 96.4 % @@ -184,61 +184,61 @@ 122 : * @brief Internal function to create tensors-info handle. 123 : */ 124 : static int - 125 21241 : _ml_tensors_info_create_internal (ml_tensors_info_h * info, bool extended) + 125 19942 : _ml_tensors_info_create_internal (ml_tensors_info_h * info, bool extended) 126 : { 127 : ml_tensors_info_s *tensors_info; 128 : - 129 21241 : check_feature_state (ML_FEATURE); + 129 19942 : check_feature_state (ML_FEATURE); 130 : - 131 21241 : if (!info) + 131 19942 : if (!info) 132 2 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 133 : "The parameter, info, is NULL. Provide a valid pointer."); 134 : - 135 21239 : *info = tensors_info = g_new0 (ml_tensors_info_s, 1); - 136 21239 : if (tensors_info == NULL) + 135 19940 : *info = tensors_info = g_new0 (ml_tensors_info_s, 1); + 136 19940 : if (tensors_info == NULL) 137 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY, 138 : "Failed to allocate the tensors info handle. Out of memory?"); 139 : - 140 21239 : g_mutex_init (&tensors_info->lock); - 141 21239 : tensors_info->is_extended = extended; + 140 19940 : g_mutex_init (&tensors_info->lock); + 141 19940 : tensors_info->is_extended = extended; 142 : 143 : /* init tensors info struct */ - 144 21239 : return _ml_tensors_info_initialize (tensors_info); + 144 19940 : return _ml_tensors_info_initialize (tensors_info); 145 : } 146 : 147 : /** 148 : * @brief Creates new tensors-info handle and copies tensors information. 149 : */ 150 : int - 151 7194 : _ml_tensors_info_create_from (const ml_tensors_info_h in, + 151 6762 : _ml_tensors_info_create_from (const ml_tensors_info_h in, 152 : ml_tensors_info_h * out) 153 : { 154 : ml_tensors_info_s *_info; 155 : int status; 156 : - 157 7194 : if (!in || !out) + 157 6762 : if (!in || !out) 158 0 : return ML_ERROR_INVALID_PARAMETER; 159 : - 160 7194 : _info = (ml_tensors_info_s *) in; + 160 6762 : _info = (ml_tensors_info_s *) in; 161 : - 162 7194 : if (_info->is_extended) + 162 6762 : if (_info->is_extended) 163 7 : status = ml_tensors_info_create_extended (out); 164 : else - 165 7187 : status = ml_tensors_info_create (out); + 165 6755 : status = ml_tensors_info_create (out); 166 : - 167 7194 : if (status == ML_ERROR_NONE) - 168 7194 : status = ml_tensors_info_clone (*out, in); + 167 6762 : if (status == ML_ERROR_NONE) + 168 6762 : status = ml_tensors_info_clone (*out, in); 169 : - 170 7194 : return status; + 170 6762 : return status; 171 : } 172 : 173 : /** 174 : * @brief Allocates a tensors information handle with default value. 175 : */ 176 : int - 177 21206 : ml_tensors_info_create (ml_tensors_info_h * info) + 177 19907 : ml_tensors_info_create (ml_tensors_info_h * info) 178 : { - 179 21206 : return _ml_tensors_info_create_internal (info, false); + 179 19907 : return _ml_tensors_info_create_internal (info, false); 180 : } 181 : 182 : /** @@ -254,53 +254,53 @@ 192 : * @brief Frees the given handle of a tensors information. 193 : */ 194 : int - 195 21188 : ml_tensors_info_destroy (ml_tensors_info_h info) + 195 19893 : ml_tensors_info_destroy (ml_tensors_info_h info) 196 : { 197 : ml_tensors_info_s *tensors_info; 198 : - 199 21188 : check_feature_state (ML_FEATURE); + 199 19893 : check_feature_state (ML_FEATURE); 200 : - 201 21188 : tensors_info = (ml_tensors_info_s *) info; + 201 19893 : tensors_info = (ml_tensors_info_s *) info; 202 : - 203 21188 : if (!tensors_info) + 203 19893 : if (!tensors_info) 204 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 205 : "The parameter, info, is NULL. Provide a valid pointer."); 206 : - 207 21187 : G_LOCK_UNLESS_NOLOCK (*tensors_info); - 208 21187 : _ml_tensors_info_free (tensors_info); - 209 21187 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info); + 207 19892 : G_LOCK_UNLESS_NOLOCK (*tensors_info); + 208 19892 : _ml_tensors_info_free (tensors_info); + 209 19892 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info); 210 : - 211 21187 : g_mutex_clear (&tensors_info->lock); - 212 21187 : g_free (tensors_info); + 211 19892 : g_mutex_clear (&tensors_info->lock); + 212 19892 : g_free (tensors_info); 213 : - 214 21187 : return ML_ERROR_NONE; + 214 19892 : return ML_ERROR_NONE; 215 : } 216 : 217 : /** 218 : * @brief Validates the given tensors info is valid. 219 : */ 220 : int - 221 7018 : ml_tensors_info_validate (const ml_tensors_info_h info, bool *valid) + 221 6586 : ml_tensors_info_validate (const ml_tensors_info_h info, bool *valid) 222 : { 223 : ml_tensors_info_s *tensors_info; 224 : - 225 7018 : check_feature_state (ML_FEATURE); + 225 6586 : check_feature_state (ML_FEATURE); 226 : - 227 7018 : if (!valid) + 227 6586 : if (!valid) 228 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 229 : "The data-return parameter, valid, is NULL. It should be a pointer pre-allocated by the caller."); 230 : - 231 7017 : tensors_info = (ml_tensors_info_s *) info; + 231 6585 : tensors_info = (ml_tensors_info_s *) info; 232 : - 233 7017 : if (!tensors_info) + 233 6585 : if (!tensors_info) 234 3 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 235 : "The input parameter, tensors_info, is NULL. It should be a valid ml_tensors_info_h, which is usually created by ml_tensors_info_create()."); 236 : - 237 7014 : G_LOCK_UNLESS_NOLOCK (*tensors_info); - 238 7014 : *valid = gst_tensors_info_validate (&tensors_info->info); - 239 7014 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info); + 237 6582 : G_LOCK_UNLESS_NOLOCK (*tensors_info); + 238 6582 : *valid = gst_tensors_info_validate (&tensors_info->info); + 239 6582 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info); 240 : - 241 7014 : return ML_ERROR_NONE; + 241 6582 : return ML_ERROR_NONE; 242 : } 243 : 244 : /** @@ -711,15 +711,15 @@ 649 : * @brief Initializes the tensors information with default value. 650 : */ 651 : int - 652 21240 : _ml_tensors_info_initialize (ml_tensors_info_s * info) + 652 19941 : _ml_tensors_info_initialize (ml_tensors_info_s * info) 653 : { - 654 21240 : if (!info) + 654 19941 : if (!info) 655 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 656 : "The parameter, info, is NULL. Provide a valid pointer."); 657 : - 658 21239 : gst_tensors_info_init (&info->info); + 658 19940 : gst_tensors_info_init (&info->info); 659 : - 660 21239 : return ML_ERROR_NONE; + 660 19940 : return ML_ERROR_NONE; 661 : } 662 : 663 : /** @@ -727,12 +727,12 @@ 665 : * @note This does not touch the lock. The caller should lock. 666 : */ 667 : void - 668 21227 : _ml_tensors_info_free (ml_tensors_info_s * info) + 668 19932 : _ml_tensors_info_free (ml_tensors_info_s * info) 669 : { - 670 21227 : if (!info) + 670 19932 : if (!info) 671 0 : return; 672 : - 673 21227 : gst_tensors_info_free (&info->info); + 673 19932 : gst_tensors_info_free (&info->info); 674 : } 675 : 676 : /** @@ -742,21 +742,21 @@ 680 : * @return @c 0 on success. Otherwise a negative error value. 681 : */ 682 : int - 683 13920 : _ml_tensors_data_destroy_internal (ml_tensors_data_h data, gboolean free_data) + 683 13057 : _ml_tensors_data_destroy_internal (ml_tensors_data_h data, gboolean free_data) 684 : { - 685 13920 : int status = ML_ERROR_NONE; + 685 13057 : int status = ML_ERROR_NONE; 686 : ml_tensors_data_s *_data; 687 : guint i; 688 : - 689 13920 : if (data == NULL) + 689 13057 : if (data == NULL) 690 19 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 691 : "The parameter, data, is NULL. It should be a valid ml_tensors_data_h handle, which is usually created by ml_tensors_data_create ()."); 692 : - 693 13901 : _data = (ml_tensors_data_s *) data; - 694 13901 : G_LOCK_UNLESS_NOLOCK (*_data); + 693 13038 : _data = (ml_tensors_data_s *) data; + 694 13038 : G_LOCK_UNLESS_NOLOCK (*_data); 695 : - 696 13901 : if (free_data) { - 697 462 : if (_data->destroy) { + 696 13038 : if (free_data) { + 697 466 : if (_data->destroy) { 698 0 : status = _data->destroy (_data, _data->user_data); 699 0 : if (status != ML_ERROR_NONE) { 700 0 : G_UNLOCK_UNLESS_NOLOCK (*_data); @@ -765,22 +765,22 @@ 703 : status); 704 : } 705 : } else { - 706 118734 : for (i = 0; i < ML_TENSOR_SIZE_LIMIT; i++) { - 707 118272 : if (_data->tensors[i].data) { - 708 686 : g_free (_data->tensors[i].data); - 709 686 : _data->tensors[i].data = NULL; + 706 119762 : for (i = 0; i < ML_TENSOR_SIZE_LIMIT; i++) { + 707 119296 : if (_data->tensors[i].data) { + 708 690 : g_free (_data->tensors[i].data); + 709 690 : _data->tensors[i].data = NULL; 710 : } 711 : } 712 : } 713 : } 714 : - 715 13901 : if (_data->info) - 716 13901 : ml_tensors_info_destroy (_data->info); + 715 13038 : if (_data->info) + 716 13038 : ml_tensors_info_destroy (_data->info); 717 : - 718 13901 : G_UNLOCK_UNLESS_NOLOCK (*_data); - 719 13901 : g_mutex_clear (&_data->lock); - 720 13901 : g_free (_data); - 721 13901 : return status; + 718 13038 : G_UNLOCK_UNLESS_NOLOCK (*_data); + 719 13038 : g_mutex_clear (&_data->lock); + 720 13038 : g_free (_data); + 721 13038 : return status; 722 : } 723 : 724 : /** @@ -788,15 +788,15 @@ 726 : * @note This does not touch the lock 727 : */ 728 : int - 729 481 : ml_tensors_data_destroy (ml_tensors_data_h data) + 729 485 : ml_tensors_data_destroy (ml_tensors_data_h data) 730 : { 731 : int ret; - 732 481 : check_feature_state (ML_FEATURE); - 733 481 : ret = _ml_tensors_data_destroy_internal (data, TRUE); - 734 481 : if (ret != ML_ERROR_NONE) + 732 485 : check_feature_state (ML_FEATURE); + 733 485 : ret = _ml_tensors_data_destroy_internal (data, TRUE); + 734 485 : if (ret != ML_ERROR_NONE) 735 19 : _ml_error_report_return_continue (ret, 736 : "Call to _ml_tensors_data_destroy_internal failed with %d", ret); - 737 462 : return ret; + 737 466 : return ret; 738 : } 739 : 740 : /** @@ -804,56 +804,56 @@ 742 : * @note Memory for tensor data buffers is not allocated. 743 : */ 744 : int - 745 13949 : _ml_tensors_data_create_no_alloc (const ml_tensors_info_h info, + 745 13082 : _ml_tensors_data_create_no_alloc (const ml_tensors_info_h info, 746 : ml_tensors_data_h * data) 747 : { 748 : ml_tensors_data_s *_data; 749 : ml_tensors_info_s *_info; 750 : guint i; - 751 13949 : int status = ML_ERROR_NONE; + 751 13082 : int status = ML_ERROR_NONE; 752 : - 753 13949 : check_feature_state (ML_FEATURE); + 753 13082 : check_feature_state (ML_FEATURE); 754 : - 755 13949 : if (data == NULL) + 755 13082 : if (data == NULL) 756 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 757 : "The parameter, data, is NULL. It should be a valid ml_tensors_info_h handle that may hold a space for ml_tensors_info_h. E.g., ml_tensors_data_h data; _ml_tensors_data_create_no_alloc (info, &data);."); 758 : 759 : /* init null */ - 760 13948 : *data = NULL; + 760 13081 : *data = NULL; 761 : - 762 13948 : _data = g_new0 (ml_tensors_data_s, 1); - 763 13948 : if (!_data) + 762 13081 : _data = g_new0 (ml_tensors_data_s, 1); + 763 13081 : if (!_data) 764 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY, 765 : "Failed to allocate memory for tensors data. Probably the system is out of memory."); 766 : - 767 13948 : g_mutex_init (&_data->lock); + 767 13081 : g_mutex_init (&_data->lock); 768 : - 769 13948 : _info = (ml_tensors_info_s *) info; - 770 13948 : if (_info != NULL) { - 771 7185 : status = _ml_tensors_info_create_from (info, &_data->info); - 772 7185 : if (status != ML_ERROR_NONE) { + 769 13081 : _info = (ml_tensors_info_s *) info; + 770 13081 : if (_info != NULL) { + 771 6753 : status = _ml_tensors_info_create_from (info, &_data->info); + 772 6753 : if (status != ML_ERROR_NONE) { 773 0 : _ml_error_report_continue 774 : ("Failed to create internal information handle for tensors data."); 775 0 : goto error; 776 : } 777 : - 778 7185 : G_LOCK_UNLESS_NOLOCK (*_info); - 779 7185 : _data->num_tensors = _info->info.num_tensors; - 780 14810 : for (i = 0; i < _data->num_tensors; i++) { - 781 7625 : _data->tensors[i].size = gst_tensors_info_get_size (&_info->info, i); - 782 7625 : _data->tensors[i].data = NULL; + 778 6753 : G_LOCK_UNLESS_NOLOCK (*_info); + 779 6753 : _data->num_tensors = _info->info.num_tensors; + 780 13946 : for (i = 0; i < _data->num_tensors; i++) { + 781 7193 : _data->tensors[i].size = gst_tensors_info_get_size (&_info->info, i); + 782 7193 : _data->tensors[i].data = NULL; 783 : } - 784 7185 : G_UNLOCK_UNLESS_NOLOCK (*_info); + 784 6753 : G_UNLOCK_UNLESS_NOLOCK (*_info); 785 : } 786 : - 787 6763 : error: - 788 13948 : if (status == ML_ERROR_NONE) { - 789 13948 : *data = _data; + 787 6328 : error: + 788 13081 : if (status == ML_ERROR_NONE) { + 789 13081 : *data = _data; 790 : } else { 791 0 : _ml_tensors_data_destroy_internal (_data, FALSE); 792 : } 793 : - 794 13948 : return status; + 794 13081 : return status; 795 : } 796 : 797 : /** @@ -973,48 +973,48 @@ 911 : * @brief Allocates a tensor data frame with the given tensors info. (more info in nnstreamer.h) 912 : */ 913 : int - 914 6924 : ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_data_h * data) + 914 6492 : ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_data_h * data) 915 : { - 916 6924 : gint status = ML_ERROR_STREAMS_PIPE; - 917 6924 : ml_tensors_data_s *_data = NULL; + 916 6492 : gint status = ML_ERROR_STREAMS_PIPE; + 917 6492 : ml_tensors_data_s *_data = NULL; 918 : guint i; 919 : bool valid; 920 : - 921 13848 : check_feature_state (ML_FEATURE); + 921 12984 : check_feature_state (ML_FEATURE); 922 : - 923 6924 : if (info == NULL) + 923 6492 : if (info == NULL) 924 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 925 : "The parameter, info, is NULL. It should be a valid pointer of ml_tensors_info_h, which is usually created by ml_tensors_info_create()."); - 926 6923 : if (data == NULL) + 926 6491 : if (data == NULL) 927 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 928 : "The parameter, data, is NULL. It should be a valid space to hold a ml_tensors_data_h handle. E.g., ml_tensors_data_h data; ml_tensors_data_create (info, &data);."); 929 : - 930 6922 : status = ml_tensors_info_validate (info, &valid); - 931 6922 : if (status != ML_ERROR_NONE) + 930 6490 : status = ml_tensors_info_validate (info, &valid); + 931 6490 : if (status != ML_ERROR_NONE) 932 0 : _ml_error_report_return_continue (status, 933 : "ml_tensors_info_validate() has reported that the parameter, info, is not NULL, but its contents are not valid. The user must provide a valid tensor information with it."); - 934 6922 : if (!valid) + 934 6490 : if (!valid) 935 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 936 : "The parameter, info, is not NULL, but its contents are not valid. The user must provide a valid tensor information with it. Probably, there is an entry that is not allocated or dimension/type information not available. The given info should have valid number of tensors, entries of every tensor along with its type and dimension info."); 937 : 938 : status = - 939 6921 : _ml_tensors_data_create_no_alloc (info, (ml_tensors_data_h *) & _data); + 939 6489 : _ml_tensors_data_create_no_alloc (info, (ml_tensors_data_h *) & _data); 940 : - 941 6921 : if (status != ML_ERROR_NONE) { + 941 6489 : if (status != ML_ERROR_NONE) { 942 0 : _ml_error_report_return_continue (status, 943 : "Failed to allocate tensor data based on the given info with the call to _ml_tensors_data_create_no_alloc (): %d. Check if it's out-of-memory.", 944 : status); 945 : } 946 : - 947 14064 : for (i = 0; i < _data->num_tensors; i++) { - 948 7143 : _data->tensors[i].data = g_malloc0 (_data->tensors[i].size); - 949 7143 : if (_data->tensors[i].data == NULL) { + 947 13200 : for (i = 0; i < _data->num_tensors; i++) { + 948 6711 : _data->tensors[i].data = g_malloc0 (_data->tensors[i].size); + 949 6711 : if (_data->tensors[i].data == NULL) { 950 0 : goto failed_oom; 951 : } 952 : } 953 : - 954 6921 : *data = _data; - 955 6921 : return ML_ERROR_NONE; + 954 6489 : *data = _data; + 955 6489 : return ML_ERROR_NONE; 956 : 957 0 : failed_oom: 958 0 : _ml_tensors_data_destroy_internal (_data, TRUE); @@ -1115,29 +1115,29 @@ 1053 : * @brief Copies tensor meta info. 1054 : */ 1055 : int - 1056 7207 : ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src) + 1056 6775 : ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src) 1057 : { 1058 : ml_tensors_info_s *dest_info, *src_info; - 1059 7207 : int status = ML_ERROR_NONE; + 1059 6775 : int status = ML_ERROR_NONE; 1060 : - 1061 7207 : check_feature_state (ML_FEATURE); + 1061 6775 : check_feature_state (ML_FEATURE); 1062 : - 1063 7207 : dest_info = (ml_tensors_info_s *) dest; - 1064 7207 : src_info = (ml_tensors_info_s *) src; + 1063 6775 : dest_info = (ml_tensors_info_s *) dest; + 1064 6775 : src_info = (ml_tensors_info_s *) src; 1065 : - 1066 7207 : if (!dest_info) + 1066 6775 : if (!dest_info) 1067 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 1068 : "The parameter, dest, is NULL. It should be an allocated handle (ml_tensors_info_h), usually allocated by ml_tensors_info_create ()."); - 1069 7206 : if (!src_info) + 1069 6774 : if (!src_info) 1070 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 1071 : "The parameter, src, is NULL. It should be a handle (ml_tensors_info_h) with valid data."); 1072 : - 1073 7205 : G_LOCK_UNLESS_NOLOCK (*dest_info); - 1074 7205 : G_LOCK_UNLESS_NOLOCK (*src_info); + 1073 6773 : G_LOCK_UNLESS_NOLOCK (*dest_info); + 1074 6773 : G_LOCK_UNLESS_NOLOCK (*src_info); 1075 : - 1076 7205 : if (gst_tensors_info_validate (&src_info->info)) { - 1077 7205 : dest_info->is_extended = src_info->is_extended; - 1078 7205 : gst_tensors_info_copy (&dest_info->info, &src_info->info); + 1076 6773 : if (gst_tensors_info_validate (&src_info->info)) { + 1077 6773 : dest_info->is_extended = src_info->is_extended; + 1078 6773 : gst_tensors_info_copy (&dest_info->info, &src_info->info); 1079 : } else { 1080 0 : _ml_error_report 1081 : ("The parameter, src, is a ml_tensors_info_h handle without valid data. Every tensor-info of tensors-info should have a valid type and dimension information and the number of tensors should be between 1 and %d.", @@ -1145,10 +1145,10 @@ 1083 0 : status = ML_ERROR_INVALID_PARAMETER; 1084 : } 1085 : - 1086 7205 : G_UNLOCK_UNLESS_NOLOCK (*src_info); - 1087 7205 : G_UNLOCK_UNLESS_NOLOCK (*dest_info); + 1086 6773 : G_UNLOCK_UNLESS_NOLOCK (*src_info); + 1087 6773 : G_UNLOCK_UNLESS_NOLOCK (*dest_info); 1088 : - 1089 7205 : return status; + 1089 6773 : return status; 1090 : } 1091 : 1092 : /** diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html index 8f97bd306..df41abb6b 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 97.5 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % @@ -65,28 +65,28 @@ _ml_tensors_info_copy_from_ml - 6920 + 6488 _ml_tensors_info_create_from_gst - 13868 + 13001 _ml_tensors_info_copy_from_gst - 13916 + 13049 gst_info_is_extended - 27780 + 26046 diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html index 5fd708eb3..587427f25 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 97.5 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % @@ -65,28 +65,28 @@ _ml_tensors_info_copy_from_gst - 13916 + 13049 _ml_tensors_info_copy_from_ml - 6920 + 6488 _ml_tensors_info_create_from_gst - 13868 + 13001 gst_info_is_extended - 27780 + 26046 diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html index aae16a543..b0c82a728 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 97.5 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % @@ -83,54 +83,54 @@ 21 : * @brief Check tensor-info has extended rank value. 22 : */ 23 : static gboolean - 24 27780 : gst_info_is_extended (const GstTensorsInfo * gst_info) + 24 26046 : gst_info_is_extended (const GstTensorsInfo * gst_info) 25 : { 26 : GstTensorInfo *_info; 27 : guint i; 28 : - 29 56862 : for (i = 0; i < gst_info->num_tensors; i++) { - 30 29106 : _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) gst_info, i); - 31 29106 : if (!_info) + 29 53394 : for (i = 0; i < gst_info->num_tensors; i++) { + 30 27372 : _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) gst_info, i); + 31 27372 : if (!_info) 32 0 : _ml_error_report_return (FALSE, 33 : "The parameter, gst_info, has invalid number of tensors. The max number of tensors is " 34 : NNS_TENSOR_SIZE_LIMIT_STR); 35 : - 36 29106 : if (_info->dimension[ML_TENSOR_RANK_LIMIT_PREV] > 0) + 36 27372 : if (_info->dimension[ML_TENSOR_RANK_LIMIT_PREV] > 0) 37 24 : return TRUE; 38 : } 39 : - 40 27756 : return FALSE; + 40 26022 : return FALSE; 41 : } 42 : 43 : /** 44 : * @brief Allocates a tensors information handle from gst info. 45 : */ 46 : int - 47 13868 : _ml_tensors_info_create_from_gst (ml_tensors_info_h * ml_info, + 47 13001 : _ml_tensors_info_create_from_gst (ml_tensors_info_h * ml_info, 48 : GstTensorsInfo * gst_info) 49 : { 50 : gboolean is_extended; 51 : - 52 13868 : if (!ml_info) + 52 13001 : if (!ml_info) 53 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 54 : "The parameter, ml_info, is NULL. It should be a valid ml_tensors_info_h instance usually created by ml_tensors_info_create(). This could be an internal bug of ML API."); 55 : - 56 13867 : if (!gst_info) + 56 13000 : if (!gst_info) 57 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 58 : "The parameter, gst_info, is NULL. It should be a valid GstTensorsInfo instance. This could be an internal bug of ML API."); 59 : - 60 13866 : is_extended = gst_info_is_extended (gst_info); - 61 13866 : if (is_extended) + 60 12999 : is_extended = gst_info_is_extended (gst_info); + 61 12999 : if (is_extended) 62 7 : _ml_error_report_return_continue_iferr 63 : (ml_tensors_info_create_extended (ml_info), 64 : "The call to ml_tensors_info_create_extended has failed with %d.", 65 : _ERRNO); 66 : else - 67 13859 : _ml_error_report_return_continue_iferr (ml_tensors_info_create (ml_info), + 67 12992 : _ml_error_report_return_continue_iferr (ml_tensors_info_create (ml_info), 68 : "The call to ml_tensors_info_create has failed with %d.", _ERRNO); 69 : - 70 13866 : _ml_tensors_info_copy_from_gst (*ml_info, gst_info); - 71 13866 : return ML_ERROR_NONE; + 70 12999 : _ml_tensors_info_copy_from_gst (*ml_info, gst_info); + 71 12999 : return ML_ERROR_NONE; 72 : } 73 : 74 : /** @@ -138,26 +138,26 @@ 76 : * @bug Thread safety required. Check its internal users first! 77 : */ 78 : int - 79 13916 : _ml_tensors_info_copy_from_gst (ml_tensors_info_h ml_info, + 79 13049 : _ml_tensors_info_copy_from_gst (ml_tensors_info_h ml_info, 80 : const GstTensorsInfo * gst_info) 81 : { 82 : ml_tensors_info_s *_info; 83 : - 84 13916 : if (!ml_info) + 84 13049 : if (!ml_info) 85 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 86 : "The parmater, ml_info, is NULL. It should be a valid ml_tensors_info_s instance, usually created by ml_tensors_info_create(). This is probably an internal bug of ML API."); - 87 13915 : if (!gst_info) + 87 13048 : if (!gst_info) 88 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 89 : "The parmater, gst_info, is NULL. It should be a valid GstTensorsInfo instance. This is probably an internal bug of ML API."); 90 : - 91 13914 : _info = (ml_tensors_info_s *) ml_info; + 91 13047 : _info = (ml_tensors_info_s *) ml_info; 92 : - 93 13914 : G_LOCK_UNLESS_NOLOCK (*_info); - 94 13914 : _info->is_extended = gst_info_is_extended (gst_info); - 95 13914 : gst_tensors_info_copy (&_info->info, gst_info); - 96 13914 : G_UNLOCK_UNLESS_NOLOCK (*_info); + 93 13047 : G_LOCK_UNLESS_NOLOCK (*_info); + 94 13047 : _info->is_extended = gst_info_is_extended (gst_info); + 95 13047 : gst_tensors_info_copy (&_info->info, gst_info); + 96 13047 : G_UNLOCK_UNLESS_NOLOCK (*_info); 97 : - 98 13914 : return ML_ERROR_NONE; + 98 13047 : return ML_ERROR_NONE; 99 : } 100 : 101 : /** @@ -165,25 +165,25 @@ 103 : * @bug Thread safety required. Check its internal users first! 104 : */ 105 : int - 106 6920 : _ml_tensors_info_copy_from_ml (GstTensorsInfo * gst_info, + 106 6488 : _ml_tensors_info_copy_from_ml (GstTensorsInfo * gst_info, 107 : const ml_tensors_info_h ml_info) 108 : { 109 : ml_tensors_info_s *_info; 110 : - 111 6920 : if (!ml_info) + 111 6488 : if (!ml_info) 112 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 113 : "The parmater, ml_info, is NULL. It should be a valid ml_tensors_info_s instance, usually created by ml_tensors_info_create(). This is probably an internal bug of ML API."); - 114 6919 : if (!gst_info) + 114 6487 : if (!gst_info) 115 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, 116 : "The parmater, gst_info, is NULL. It should be a valid GstTensorsInfo instance. This is probably an internal bug of ML API."); 117 : - 118 6918 : _info = (ml_tensors_info_s *) ml_info; + 118 6486 : _info = (ml_tensors_info_s *) ml_info; 119 : - 120 6918 : G_LOCK_UNLESS_NOLOCK (*_info); - 121 6918 : gst_tensors_info_copy (gst_info, &_info->info); - 122 6918 : G_UNLOCK_UNLESS_NOLOCK (*_info); + 120 6486 : G_LOCK_UNLESS_NOLOCK (*_info); + 121 6486 : gst_tensors_info_copy (gst_info, &_info->info); + 122 6486 : G_UNLOCK_UNLESS_NOLOCK (*_info); 123 : - 124 6918 : return ML_ERROR_NONE; + 124 6486 : return ML_ERROR_NONE; 125 : } diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html index 5d8389660..22afb7415 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 82.6 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 94.0 % @@ -604,49 +604,49 @@ _pipe_src_cb_need_data - 6646 + 6214 get_app_src_callback - 6646 + 6214 ml_pipeline_src_get_tensors_info - 6650 + 6218 ml_pipeline_src_input_data - 6734 + 6302 cb_sink_event - 6763 + 6328 ml_pipeline_src_parse_tensors_info - 13426 + 12562 get_tensors_info_from_caps - 13454 + 12590 diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html index d8c326e0d..02c7c32eb 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 82.6 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 94.0 % @@ -100,7 +100,7 @@ _pipe_src_cb_need_data - 6646 + 6214 @@ -128,7 +128,7 @@ cb_sink_event - 6763 + 6328 @@ -184,7 +184,7 @@ get_app_src_callback - 6646 + 6214 @@ -198,7 +198,7 @@ get_tensors_info_from_caps - 13454 + 12590 @@ -492,21 +492,21 @@ ml_pipeline_src_get_tensors_info - 6650 + 6218 ml_pipeline_src_input_data - 6734 + 6302 ml_pipeline_src_parse_tensors_info - 13426 + 12562 diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html index a137e4d72..42d35979a 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 82.6 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 94.0 % @@ -315,69 +315,69 @@ 253 : * @brief Internal function to get the tensors info from the element caps. 254 : */ 255 : static gboolean - 256 13454 : get_tensors_info_from_caps (GstCaps * caps, GstTensorsInfo * info, + 256 12590 : get_tensors_info_from_caps (GstCaps * caps, GstTensorsInfo * info, 257 : gboolean * is_flexible) 258 : { 259 : GstStructure *s; 260 : GstTensorsConfig config; 261 : guint i, n_caps; - 262 13454 : gboolean found = FALSE; + 262 12590 : gboolean found = FALSE; 263 : - 264 13454 : n_caps = gst_caps_get_size (caps); + 264 12590 : n_caps = gst_caps_get_size (caps); 265 : - 266 13461 : for (i = 0; i < n_caps; i++) { - 267 13455 : s = gst_caps_get_structure (caps, i); - 268 13455 : found = gst_tensors_config_from_structure (&config, s); + 266 12597 : for (i = 0; i < n_caps; i++) { + 267 12591 : s = gst_caps_get_structure (caps, i); + 268 12591 : found = gst_tensors_config_from_structure (&config, s); 269 : - 270 13455 : if (found) { - 271 13448 : gst_tensors_info_free (info); - 272 13448 : gst_tensors_info_copy (info, &config.info); - 273 13448 : *is_flexible = gst_tensors_config_is_flexible (&config); + 270 12591 : if (found) { + 271 12584 : gst_tensors_info_free (info); + 272 12584 : gst_tensors_info_copy (info, &config.info); + 273 12584 : *is_flexible = gst_tensors_config_is_flexible (&config); 274 : } 275 : - 276 13455 : gst_tensors_config_free (&config); - 277 13455 : if (found) - 278 13448 : break; + 276 12591 : gst_tensors_config_free (&config); + 277 12591 : if (found) + 278 12584 : break; 279 : } 280 : - 281 13454 : return found; + 281 12590 : return found; 282 : } 283 : 284 : /** 285 : * @brief Handle a sink element for registered ml_pipeline_sink_cb 286 : */ 287 : static void - 288 6763 : cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) + 288 6328 : cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) 289 : { - 290 6763 : ml_pipeline_element *elem = user_data; + 290 6328 : ml_pipeline_element *elem = user_data; 291 : 292 : /** @todo CRITICAL if the pipeline is being killed, don't proceed! */ 293 : GstMemory *mem[ML_TENSOR_SIZE_LIMIT]; 294 : GstMapInfo map[ML_TENSOR_SIZE_LIMIT]; 295 : guint i, num_tensors; 296 : GList *l; - 297 6763 : ml_tensors_data_s *_data = NULL; + 297 6328 : ml_tensors_data_s *_data = NULL; 298 : GstTensorsInfo gst_info; 299 : int status; 300 : - 301 6763 : gst_tensors_info_init (&gst_info); - 302 6763 : gst_info.num_tensors = num_tensors = gst_tensor_buffer_get_count (b); + 301 6328 : gst_tensors_info_init (&gst_info); + 302 6328 : gst_info.num_tensors = num_tensors = gst_tensor_buffer_get_count (b); 303 : 304 : /* Set tensor data. The handle for tensors-info in data should be added. */ 305 : status = - 306 6763 : _ml_tensors_data_create_no_alloc (NULL, (ml_tensors_data_h *) & _data); - 307 6763 : if (status != ML_ERROR_NONE) { + 306 6328 : _ml_tensors_data_create_no_alloc (NULL, (ml_tensors_data_h *) & _data); + 307 6328 : if (status != ML_ERROR_NONE) { 308 0 : _ml_loge (_ml_detail 309 : ("Failed to allocate memory for tensors data in sink callback, which is registered by ml_pipeline_sink_register ().")); - 310 6763 : return; + 310 6328 : return; 311 : } 312 : - 313 6763 : g_mutex_lock (&elem->lock); + 313 6328 : g_mutex_lock (&elem->lock); 314 : - 315 6763 : _data->num_tensors = num_tensors; - 316 14003 : for (i = 0; i < num_tensors; i++) { - 317 7240 : mem[i] = gst_tensor_buffer_get_nth_memory (b, i); - 318 7240 : if (!gst_memory_map (mem[i], &map[i], GST_MAP_READ)) { + 315 6328 : _data->num_tensors = num_tensors; + 316 13133 : for (i = 0; i < num_tensors; i++) { + 317 6805 : mem[i] = gst_tensor_buffer_get_nth_memory (b, i); + 318 6805 : if (!gst_memory_map (mem[i], &map[i], GST_MAP_READ)) { 319 0 : _ml_loge (_ml_detail 320 : ("Failed to map the output in sink '%s' callback, which is registered by ml_pipeline_sink_register ()", 321 : elem->name)); @@ -386,12 +386,12 @@ 324 0 : goto error; 325 : } 326 : - 327 7240 : _data->tensors[i].data = map[i].data; - 328 7240 : _data->tensors[i].size = map[i].size; + 327 6805 : _data->tensors[i].data = map[i].data; + 328 6805 : _data->tensors[i].size = map[i].size; 329 : } 330 : 331 : /** @todo This assumes that padcap is static */ - 332 6763 : if (elem->sink == NULL) { + 332 6328 : if (elem->sink == NULL) { 333 28 : gboolean found = FALSE; 334 28 : gboolean flexible = FALSE; 335 : @@ -423,7 +423,7 @@ 361 : } 362 : 363 : /* Prepare output and set data. */ - 364 6763 : if (elem->is_flexible_tensor) { + 364 6328 : if (elem->is_flexible_tensor) { 365 : GstTensorMetaInfo meta; 366 : gsize hsize; 367 : @@ -439,10 +439,10 @@ 377 9 : _data->tensors[i].size = map[i].size - hsize; 378 : } 379 : } else { - 380 6760 : gst_tensors_info_copy (&gst_info, &elem->tensors_info); + 380 6325 : gst_tensors_info_copy (&gst_info, &elem->tensors_info); 381 : 382 : /* Compare output info and buffer if gst-buffer is not flexible. */ - 383 6760 : if (gst_info.num_tensors != num_tensors) { + 383 6325 : if (gst_info.num_tensors != num_tensors) { 384 0 : _ml_loge (_ml_detail 385 : ("The sink event of [%s] cannot be handled because the number of tensors mismatches.", 386 : elem->name)); @@ -452,15 +452,15 @@ 390 0 : goto error; 391 : } 392 : - 393 13991 : for (i = 0; i < num_tensors; i++) { - 394 7231 : size_t sz = gst_tensors_info_get_size (&gst_info, i); + 393 13121 : for (i = 0; i < num_tensors; i++) { + 394 6796 : size_t sz = gst_tensors_info_get_size (&gst_info, i); 395 : 396 : /* Not configured, yet. */ - 397 7231 : if (sz == 0) + 397 6796 : if (sz == 0) 398 0 : _ml_loge (_ml_detail 399 : ("The caps for sink(%s) is not configured.", elem->name)); 400 : - 401 7231 : if (sz != map[i].size) { + 401 6796 : if (sz != map[i].size) { 402 0 : _ml_loge (_ml_detail 403 : ("The sink event of [%s] cannot be handled because the tensor dimension mismatches.", 404 : elem->name)); @@ -473,35 +473,35 @@ 411 : } 412 : 413 : /* Create new output info, data handle should be updated here. */ - 414 6763 : _ml_tensors_info_create_from_gst (&_data->info, &gst_info); + 414 6328 : _ml_tensors_info_create_from_gst (&_data->info, &gst_info); 415 : 416 : /* Iterate e->handles, pass the data to them */ - 417 13532 : for (l = elem->handles; l != NULL; l = l->next) { + 417 12662 : for (l = elem->handles; l != NULL; l = l->next) { 418 : ml_pipeline_sink_cb callback; - 419 6769 : ml_pipeline_common_elem *sink = l->data; - 420 6769 : if (sink->callback_info == NULL) + 419 6334 : ml_pipeline_common_elem *sink = l->data; + 420 6334 : if (sink->callback_info == NULL) 421 3 : continue; 422 : - 423 6766 : callback = sink->callback_info->sink_cb; - 424 6766 : if (callback) - 425 6766 : callback (_data, _data->info, sink->callback_info->sink_pdata); + 423 6331 : callback = sink->callback_info->sink_cb; + 424 6331 : if (callback) + 425 6331 : callback (_data, _data->info, sink->callback_info->sink_pdata); 426 : 427 : /** @todo Measure time. Warn if it takes long. Kill if it takes too long. */ 428 : } 429 : - 430 6763 : error: - 431 6763 : g_mutex_unlock (&elem->lock); + 430 6328 : error: + 431 6328 : g_mutex_unlock (&elem->lock); 432 : - 433 14003 : for (i = 0; i < num_tensors; i++) { - 434 7240 : gst_memory_unmap (mem[i], &map[i]); - 435 7240 : gst_memory_unref (mem[i]); + 433 13133 : for (i = 0; i < num_tensors; i++) { + 434 6805 : gst_memory_unmap (mem[i], &map[i]); + 435 6805 : gst_memory_unref (mem[i]); 436 : } 437 : - 438 6763 : _ml_tensors_data_destroy_internal (_data, FALSE); - 439 6763 : _data = NULL; + 438 6328 : _ml_tensors_data_destroy_internal (_data, FALSE); + 439 6328 : _data = NULL; 440 : - 441 6763 : gst_tensors_info_free (&gst_info); - 442 6763 : return; + 441 6328 : gst_tensors_info_free (&gst_info); + 442 6328 : return; 443 : } 444 : 445 : /** @@ -1559,29 +1559,29 @@ 1497 : * @brief Parse tensors info of src element. 1498 : */ 1499 : static int - 1500 13426 : ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem) + 1500 12562 : ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem) 1501 : { - 1502 13426 : GstCaps *caps = NULL; - 1503 13426 : gboolean found = FALSE, flexible = FALSE; + 1502 12562 : GstCaps *caps = NULL; + 1503 12562 : gboolean found = FALSE, flexible = FALSE; 1504 : - 1505 13426 : if (elem->src == NULL) { + 1505 12562 : if (elem->src == NULL) { 1506 41 : elem->src = gst_element_get_static_pad (elem->element, "src"); 1507 : } 1508 : - 1509 13426 : if (elem->src == NULL) { + 1509 12562 : if (elem->src == NULL) { 1510 0 : _ml_error_report 1511 : ("Failed to get the src pad of the element[%s]. The designated source element does not have available src pad? For the detail, please check the GStreamer log messages.", 1512 : elem->name); - 1513 13426 : return ML_ERROR_STREAMS_PIPE; + 1513 12562 : return ML_ERROR_STREAMS_PIPE; 1514 : } 1515 : 1516 : /* If caps is given, use it. e.g. Use cap "image/png" when the pipeline is */ 1517 : /* given as "appsrc caps=image/png ! pngdec ! ... " */ - 1518 13426 : caps = gst_pad_get_current_caps (elem->src); - 1519 13426 : if (!caps) - 1520 13362 : caps = gst_pad_get_allowed_caps (elem->src); + 1518 12562 : caps = gst_pad_get_current_caps (elem->src); + 1519 12562 : if (!caps) + 1520 12498 : caps = gst_pad_get_allowed_caps (elem->src); 1521 : - 1522 13426 : if (!caps) { + 1522 12562 : if (!caps) { 1523 0 : _ml_logw 1524 : ("Cannot find caps. The pipeline is not yet negotiated for src element [%s].", 1525 : elem->name); @@ -1590,10 +1590,10 @@ 1528 0 : return ML_ERROR_TRY_AGAIN; 1529 : } 1530 : - 1531 13426 : found = get_tensors_info_from_caps (caps, &elem->tensors_info, &flexible); + 1531 12562 : found = get_tensors_info_from_caps (caps, &elem->tensors_info, &flexible); 1532 : - 1533 13426 : if (found) { - 1534 13420 : elem->is_flexible_tensor = flexible; + 1533 12562 : if (found) { + 1534 12556 : elem->is_flexible_tensor = flexible; 1535 : } else { 1536 6 : if (gst_caps_is_fixed (caps)) { 1537 5 : GstStructure *st = gst_caps_get_structure (caps, 0); @@ -1601,8 +1601,8 @@ 1539 : } 1540 : } 1541 : - 1542 13426 : gst_caps_unref (caps); - 1543 13426 : return ML_ERROR_NONE; + 1542 12562 : gst_caps_unref (caps); + 1543 12562 : return ML_ERROR_NONE; 1544 : } 1545 : 1546 : /** @@ -1698,7 +1698,7 @@ 1636 : * @brief Push a data frame to a src (more info in nnstreamer.h) 1637 : */ 1638 : int - 1639 6734 : ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, + 1639 6302 : ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, 1640 : ml_pipeline_buf_policy_e policy) 1641 : { 1642 : GstBuffer *buffer; @@ -1710,18 +1710,18 @@ 1648 : ml_tensors_data_s *_data; 1649 : unsigned int i; 1650 : - 1651 13468 : handle_init (src, h); + 1651 12604 : handle_init (src, h); 1652 : - 1653 6734 : _data = (ml_tensors_data_s *) data; - 1654 6734 : if (!_data) { + 1653 6302 : _data = (ml_tensors_data_s *) data; + 1654 6302 : if (!_data) { 1655 1 : _ml_error_report 1656 : ("The given parameter, data (ml_tensors_data_h), is NULL. It should be a valid ml_tensor_data_h instance, which is usually created by ml_tensors_data_create()."); 1657 1 : ret = ML_ERROR_INVALID_PARAMETER; 1658 1 : goto unlock_return; 1659 : } - 1660 6733 : G_LOCK_UNLESS_NOLOCK (*_data); + 1660 6301 : G_LOCK_UNLESS_NOLOCK (*_data); 1661 : - 1662 6733 : if (_data->num_tensors < 1 || _data->num_tensors > ML_TENSOR_SIZE_LIMIT) { + 1662 6301 : if (_data->num_tensors < 1 || _data->num_tensors > ML_TENSOR_SIZE_LIMIT) { 1663 0 : _ml_error_report 1664 : ("The number of tensors of the given data (ml_tensors_data_h) is invalid. The number of tensors of data is %u. It should be between 1 and %u.", 1665 : _data->num_tensors, ML_TENSOR_SIZE_LIMIT); @@ -1729,9 +1729,9 @@ 1667 0 : goto dont_destroy_data; 1668 : } 1669 : - 1670 6733 : ret = ml_pipeline_src_parse_tensors_info (elem); + 1670 6301 : ret = ml_pipeline_src_parse_tensors_info (elem); 1671 : - 1672 6733 : if (ret != ML_ERROR_NONE) { + 1672 6301 : if (ret != ML_ERROR_NONE) { 1673 0 : if (ret == ML_ERROR_TRY_AGAIN) 1674 0 : _ml_error_report_continue 1675 : ("The pipeline is not ready to accept input streams. The input is ignored."); @@ -1741,8 +1741,8 @@ 1679 0 : goto dont_destroy_data; 1680 : } 1681 : - 1682 6733 : if (!elem->is_media_stream && !elem->is_flexible_tensor) { - 1683 6726 : if (elem->tensors_info.num_tensors != _data->num_tensors) { + 1682 6301 : if (!elem->is_media_stream && !elem->is_flexible_tensor) { + 1683 6294 : if (elem->tensors_info.num_tensors != _data->num_tensors) { 1684 0 : _ml_error_report 1685 : ("The src push of [%s] cannot be handled because the number of tensors in a frame mismatches. %u != %u", 1686 : elem->name, elem->tensors_info.num_tensors, _data->num_tensors); @@ -1751,10 +1751,10 @@ 1689 0 : goto dont_destroy_data; 1690 : } 1691 : - 1692 13605 : for (i = 0; i < _data->num_tensors; i++) { - 1693 6881 : size_t sz = gst_tensors_info_get_size (&elem->tensors_info, i); + 1692 12741 : for (i = 0; i < _data->num_tensors; i++) { + 1693 6449 : size_t sz = gst_tensors_info_get_size (&elem->tensors_info, i); 1694 : - 1695 6881 : if (sz != _data->tensors[i].size) { + 1695 6449 : if (sz != _data->tensors[i].size) { 1696 2 : _ml_error_report 1697 : ("The given input tensor size (%d'th, %zu bytes) mismatches the source pad (%zu bytes)", 1698 : i, _data->tensors[i].size, sz); @@ -1766,21 +1766,21 @@ 1704 : } 1705 : 1706 : /* Create buffer to be pushed from buf[] */ - 1707 6731 : buffer = gst_buffer_new (); - 1708 6731 : _ml_tensors_info_copy_from_ml (&gst_info, _data->info); + 1707 6299 : buffer = gst_buffer_new (); + 1708 6299 : _ml_tensors_info_copy_from_ml (&gst_info, _data->info); 1709 : - 1710 13623 : for (i = 0; i < _data->num_tensors; i++) { + 1710 12759 : for (i = 0; i < _data->num_tensors; i++) { 1711 : GstTensorInfo *_gst_tensor_info = - 1712 6892 : gst_tensors_info_get_nth_info (&gst_info, i); - 1713 6892 : mem_data = _data->tensors[i].data; - 1714 6892 : mem_size = _data->tensors[i].size; + 1712 6460 : gst_tensors_info_get_nth_info (&gst_info, i); + 1713 6460 : mem_data = _data->tensors[i].data; + 1714 6460 : mem_size = _data->tensors[i].size; 1715 : - 1716 6892 : mem = tmp = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY, + 1716 6460 : mem = tmp = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY, 1717 : mem_data, mem_size, 0, mem_size, mem_data, 1718 : (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) ? g_free : NULL); 1719 : 1720 : /* flex tensor, append header. */ - 1721 6892 : if (elem->is_flexible_tensor) { + 1721 6460 : if (elem->is_flexible_tensor) { 1722 : GstTensorMetaInfo meta; 1723 : 1724 9 : gst_tensor_info_convert_to_meta (_gst_tensor_info, &meta); @@ -1789,76 +1789,76 @@ 1727 9 : gst_memory_unref (tmp); 1728 : } 1729 : - 1730 6892 : gst_tensor_buffer_append_memory (buffer, mem, _gst_tensor_info); + 1730 6460 : gst_tensor_buffer_append_memory (buffer, mem, _gst_tensor_info); 1731 : /** @todo Verify that gst_buffer_append lists tensors/gstmem in the correct order */ 1732 : } 1733 : - 1734 6731 : gst_tensors_info_free (&gst_info); + 1734 6299 : gst_tensors_info_free (&gst_info); 1735 : 1736 : /* Unlock if it's not auto-free. We do not know when it'll be freed. */ - 1737 6731 : if (policy != ML_PIPELINE_BUF_POLICY_AUTO_FREE) + 1737 6299 : if (policy != ML_PIPELINE_BUF_POLICY_AUTO_FREE) 1738 55 : G_UNLOCK_UNLESS_NOLOCK (*_data); 1739 : 1740 : /* Push the data! */ - 1741 6731 : gret = gst_app_src_push_buffer (GST_APP_SRC (elem->element), buffer); + 1741 6299 : gret = gst_app_src_push_buffer (GST_APP_SRC (elem->element), buffer); 1742 : 1743 : /* Free data ptr if buffer policy is auto-free */ - 1744 6731 : if (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) { - 1745 6676 : G_UNLOCK_UNLESS_NOLOCK (*_data); - 1746 6676 : _ml_tensors_data_destroy_internal (_data, FALSE); - 1747 6676 : _data = NULL; + 1744 6299 : if (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) { + 1745 6244 : G_UNLOCK_UNLESS_NOLOCK (*_data); + 1746 6244 : _ml_tensors_data_destroy_internal (_data, FALSE); + 1747 6244 : _data = NULL; 1748 : } 1749 : - 1750 6731 : if (gret == GST_FLOW_FLUSHING) { + 1750 6299 : if (gret == GST_FLOW_FLUSHING) { 1751 0 : _ml_logw 1752 : ("The pipeline is not in PAUSED/PLAYING. The input may be ignored."); 1753 0 : ret = ML_ERROR_TRY_AGAIN; - 1754 6731 : } else if (gret == GST_FLOW_EOS) { + 1754 6299 : } else if (gret == GST_FLOW_EOS) { 1755 0 : _ml_logw ("THe pipeline is in EOS state. The input is ignored."); 1756 0 : ret = ML_ERROR_STREAMS_PIPE; 1757 : } 1758 : - 1759 6731 : goto unlock_return; + 1759 6299 : goto unlock_return; 1760 : 1761 2 : dont_destroy_data: 1762 2 : G_UNLOCK_UNLESS_NOLOCK (*_data); 1763 : - 1764 6734 : handle_exit (h); + 1764 6302 : handle_exit (h); 1765 : } 1766 : 1767 : /** 1768 : * @brief Internal function to fetch ml_pipeline_src_callbacks_s pointer 1769 : */ 1770 : static ml_pipeline_src_callbacks_s * - 1771 6646 : get_app_src_callback (ml_pipeline_common_elem * src_h, void **data) + 1771 6214 : get_app_src_callback (ml_pipeline_common_elem * src_h, void **data) 1772 : { - 1773 6646 : ml_pipeline_src_callbacks_s *src_cb = NULL; + 1773 6214 : ml_pipeline_src_callbacks_s *src_cb = NULL; 1774 : - 1775 6646 : if (src_h->callback_info) { - 1776 6646 : src_cb = &src_h->callback_info->src_cb; - 1777 6646 : *data = src_h->callback_info->src_pdata; + 1775 6214 : if (src_h->callback_info) { + 1776 6214 : src_cb = &src_h->callback_info->src_cb; + 1777 6214 : *data = src_h->callback_info->src_pdata; 1778 : } 1779 : - 1780 6646 : return src_cb; + 1780 6214 : return src_cb; 1781 : } 1782 : 1783 : /** 1784 : * @brief Internal function for appsrc callback - need_data. 1785 : */ 1786 : static void - 1787 6646 : _pipe_src_cb_need_data (GstAppSrc * src, guint length, gpointer user_data) + 1787 6214 : _pipe_src_cb_need_data (GstAppSrc * src, guint length, gpointer user_data) 1788 : { 1789 : ml_pipeline_common_elem *src_h; - 1790 6646 : ml_pipeline_src_callbacks_s *src_cb = NULL; - 1791 6646 : void *pdata = NULL; + 1790 6214 : ml_pipeline_src_callbacks_s *src_cb = NULL; + 1791 6214 : void *pdata = NULL; 1792 : - 1793 6646 : src_h = (ml_pipeline_common_elem *) user_data; - 1794 6646 : if (!src_h) + 1793 6214 : src_h = (ml_pipeline_common_elem *) user_data; + 1794 6214 : if (!src_h) 1795 0 : return; 1796 : - 1797 6646 : src_cb = get_app_src_callback (src_h, &pdata); - 1798 6646 : if (src_cb && src_cb->need_data) - 1799 6646 : src_cb->need_data (src_h, length, pdata); + 1797 6214 : src_cb = get_app_src_callback (src_h, &pdata); + 1798 6214 : if (src_cb && src_cb->need_data) + 1799 6214 : src_cb->need_data (src_h, length, pdata); 1800 : } 1801 : 1802 : /** @@ -1944,27 +1944,27 @@ 1882 : * @brief Gets a handle for the tensors metadata of given src node. 1883 : */ 1884 : int - 1885 6650 : ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h, ml_tensors_info_h * info) + 1885 6218 : ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h, ml_tensors_info_h * info) 1886 : { - 1887 6650 : handle_init (src, h); + 1887 6218 : handle_init (src, h); 1888 : - 1889 6650 : if (info == NULL) { + 1889 6218 : if (info == NULL) { 1890 0 : _ml_error_report 1891 : ("The parameter, info (ml_tensors_info_h *), is NULL. It should be a valid pointer to a ml_tensors_info_h instance, which is usually created by ml_tensors_info_create()."); 1892 0 : ret = ML_ERROR_INVALID_PARAMETER; 1893 0 : goto unlock_return; 1894 : } 1895 : - 1896 6650 : ret = ml_pipeline_src_parse_tensors_info (elem); + 1896 6218 : ret = ml_pipeline_src_parse_tensors_info (elem); 1897 : - 1898 6650 : if (ret == ML_ERROR_NONE) { - 1899 6650 : ret = _ml_tensors_info_create_from_gst (info, &elem->tensors_info); + 1898 6218 : if (ret == ML_ERROR_NONE) { + 1899 6218 : ret = _ml_tensors_info_create_from_gst (info, &elem->tensors_info); 1900 : } else { 1901 0 : _ml_error_report_continue 1902 : ("ml_pipeline_src_parse_tensors_info () has returned error; it cannot fetch input tensor info (metadata of input stream) for the given ml_pipeline_src_h handle (h). ml_pipeline_src_get_tensors_info () cannot continue."); 1903 : } 1904 : - 1905 6650 : handle_exit (h); + 1905 6218 : handle_exit (h); 1906 : } 1907 : 1908 : /**************************************************** diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html index 05c986873..b30da22c5 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions @@ -28,21 +28,21 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: - 75.9 % + 75.1 % 855 - 649 + 642 Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: - 94.9 % + 92.3 % 39 - 37 + 36 @@ -63,30 +63,30 @@ - ml_single_destroy_notify_cb + __destroy_notify 0 - ml_single_set_inout_tensors_info + ml_single_destroy_notify_cb 0 - ml_single_invoke_fast + ml_single_set_inout_tensors_info - 1 + 0 - __destroy_notify + ml_single_invoke_fast - 2 + 1 @@ -217,16 +217,16 @@ - __invoke + set_destroy_notify 86 - set_destroy_notify + __invoke - 86 + 88 diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html index ea7e45614..4e5c70d66 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions @@ -28,21 +28,21 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: - 75.9 % + 75.1 % 855 - 649 + 642 Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: - 94.9 % + 92.3 % 39 - 37 + 36 @@ -65,14 +65,14 @@ __destroy_notify - 2 + 0 __invoke - 86 + 88 diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html index 8841a4049..8ccdb1320 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c @@ -28,21 +28,21 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: - 75.9 % + 75.1 % 855 - 649 + 642 Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: - 94.9 % + 92.3 % 39 - 37 + 36 @@ -397,29 +397,29 @@ 335 : * @brief To call the framework to destroy the allocated output data 336 : */ 337 : static inline void - 338 2 : __destroy_notify (gpointer data_h, gpointer single_data) + 338 0 : __destroy_notify (gpointer data_h, gpointer single_data) 339 : { 340 : ml_single *single_h; 341 : ml_tensors_data_s *data; 342 : - 343 2 : data = (ml_tensors_data_s *) data_h; - 344 2 : single_h = (ml_single *) single_data; + 343 0 : data = (ml_tensors_data_s *) data_h; + 344 0 : single_h = (ml_single *) single_data; 345 : - 346 2 : if (G_LIKELY (single_h->filter)) { - 347 2 : if (single_h->klass->allocate_in_invoke (single_h->filter)) { - 348 0 : single_h->klass->destroy_notify (single_h->filter, data->tensors); + 346 0 : if (G_LIKELY (single_h->filter)) { + 347 0 : if (single_h->klass->allocate_in_invoke (single_h->filter)) { + 348 0 : single_h->klass->destroy_notify (single_h->filter, data->tensors); 349 : } 350 : } 351 : 352 : /* reset callback function */ - 353 2 : data->destroy = NULL; - 354 2 : } + 353 0 : data->destroy = NULL; + 354 0 : } 355 : 356 : /** 357 : * @brief Wrapper function for __destroy_notify 358 : */ 359 : static int - 360 0 : ml_single_destroy_notify_cb (void *handle, void *user_data) + 360 0 : ml_single_destroy_notify_cb (void *handle, void *user_data) 361 : { 362 0 : ml_tensors_data_h data = (ml_tensors_data_h) handle; 363 0 : ml_single_h single = (ml_single_h) user_data; @@ -478,24 +478,24 @@ 416 : * @brief Internal function to call subplugin's invoke 417 : */ 418 : static inline int - 419 86 : __invoke (ml_single * single_h, ml_tensors_data_h in, ml_tensors_data_h out, + 419 88 : __invoke (ml_single * single_h, ml_tensors_data_h in, ml_tensors_data_h out, 420 : gboolean alloc_output) 421 : { 422 : ml_tensors_data_s *in_data, *out_data; - 423 86 : int status = ML_ERROR_NONE; + 423 88 : int status = ML_ERROR_NONE; 424 : - 425 86 : in_data = (ml_tensors_data_s *) in; - 426 86 : out_data = (ml_tensors_data_s *) out; + 425 88 : in_data = (ml_tensors_data_s *) in; + 426 88 : out_data = (ml_tensors_data_s *) out; 427 : 428 : /* Prevent error case when input or output is null in invoke thread. */ - 429 86 : if (!in_data || !out_data) { + 429 88 : if (!in_data || !out_data) { 430 0 : _ml_error_report ("Failed to invoke a model, invalid data handle."); 431 0 : return ML_ERROR_STREAMS_PIPE; 432 : } 433 : 434 : /* Invoke the thread. */ - 435 86 : if (!single_h->klass->invoke (single_h->filter, in_data->tensors, - 436 86 : out_data->tensors, alloc_output)) { + 435 88 : if (!single_h->klass->invoke (single_h->filter, in_data->tensors, + 436 88 : out_data->tensors, alloc_output)) { 437 0 : const char *fw_name = _ml_get_nnfw_subplugin_name (single_h->nnfw); 438 0 : _ml_error_report 439 : ("Failed to invoke the tensors. The invoke callback of the tensor-filter subplugin '%s' has failed. Please contact the author of tensor-filter-%s (nnstreamer-%s) or review its source code. Note that this usually happens when the designated framework does not support the given model (e.g., trying to run tf-lite 2.6 model with tf-lite 1.13).", @@ -503,7 +503,7 @@ 441 0 : status = ML_ERROR_STREAMS_PIPE; 442 : } 443 : - 444 86 : return status; + 444 88 : return status; 445 : } 446 : 447 : /** @@ -563,35 +563,35 @@ 501 102 : int status = ML_ERROR_NONE; 502 : 503 : /** wait for data */ - 504 124 : while (single_h->state != RUNNING) { + 504 126 : while (single_h->state != RUNNING) { 505 102 : g_cond_wait (&single_h->cond, &single_h->mutex); 506 100 : if (single_h->state == JOIN_REQUESTED) - 507 78 : goto exit; + 507 76 : goto exit; 508 : } 509 : - 510 22 : input = single_h->input; - 511 22 : output = single_h->output; + 510 24 : input = single_h->input; + 511 24 : output = single_h->output; 512 : /* Set null to prevent double-free. */ - 513 22 : single_h->input = single_h->output = NULL; + 513 24 : single_h->input = single_h->output = NULL; 514 : - 515 22 : single_h->invoking = TRUE; - 516 22 : alloc_output = single_h->free_output; - 517 22 : g_mutex_unlock (&single_h->mutex); - 518 22 : status = __invoke (single_h, input, output, alloc_output); - 519 22 : g_mutex_lock (&single_h->mutex); + 515 24 : single_h->invoking = TRUE; + 516 24 : alloc_output = single_h->free_output; + 517 24 : g_mutex_unlock (&single_h->mutex); + 518 24 : status = __invoke (single_h, input, output, alloc_output); + 519 24 : g_mutex_lock (&single_h->mutex); 520 : /* Clear input data after invoke is done. */ - 521 22 : ml_tensors_data_destroy (input); - 522 22 : single_h->invoking = FALSE; + 521 24 : ml_tensors_data_destroy (input); + 522 24 : single_h->invoking = FALSE; 523 : - 524 22 : if (status != ML_ERROR_NONE || single_h->state == JOIN_REQUESTED) { - 525 2 : if (alloc_output) { - 526 2 : single_h->destroy_data_list = - 527 2 : g_list_remove (single_h->destroy_data_list, output); - 528 2 : ml_tensors_data_destroy (output); + 524 24 : if (status != ML_ERROR_NONE || single_h->state == JOIN_REQUESTED) { + 525 4 : if (alloc_output) { + 526 4 : single_h->destroy_data_list = + 527 4 : g_list_remove (single_h->destroy_data_list, output); + 528 4 : ml_tensors_data_destroy (output); 529 : } 530 : - 531 2 : if (single_h->state == JOIN_REQUESTED) - 532 2 : goto exit; + 531 4 : if (single_h->state == JOIN_REQUESTED) + 532 4 : goto exit; 533 0 : goto wait_for_next; 534 : } 535 : @@ -1298,10 +1298,10 @@ 1236 80 : ML_SINGLE_HANDLE_UNLOCK (single_h); 1237 : 1238 : /** Wait until invoke process is finished */ - 1239 445 : while (invoking) { - 1240 365 : _ml_logd ("Wait 1 ms until invoke is finished and close the handle."); - 1241 365 : g_usleep (1000); - 1242 365 : invoking = single_h->invoking; + 1239 1047 : while (invoking) { + 1240 967 : _ml_logd ("Wait 1 ms until invoke is finished and close the handle."); + 1241 967 : g_usleep (1000); + 1242 967 : invoking = single_h->invoking; 1243 : /** 1244 : * single_h->invoking is the only protected value here and we are 1245 : * doing a read-only operation and do not need to project its value diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html index caf513876..587301d82 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 88.0 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html index b421a9a46..47c35994a 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 88.0 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html index cf2b0ec0b..2ed98ccef 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 88.0 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html index d6255281d..15b48e1c8 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 84.3 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html index 15da58b56..8ea2fcb08 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 84.3 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html index beb3ddc49..572d1e917 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 84.3 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html index a15692510..bb051fcac 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 79.7 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % @@ -240,7 +240,7 @@ _mlrs_edge_event_cb - 54 + 55 diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html index c6c3872c7..cbfb588c0 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 79.7 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % @@ -170,7 +170,7 @@ _mlrs_edge_event_cb - 54 + 55 diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html index 749234fd9..b87d65bf5 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 79.7 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % @@ -613,17 +613,17 @@ 551 : * @brief Edge event callback. 552 : */ 553 : static int - 554 54 : _mlrs_edge_event_cb (nns_edge_event_h event_h, void *user_data) + 554 55 : _mlrs_edge_event_cb (nns_edge_event_h event_h, void *user_data) 555 : { - 556 54 : nns_edge_event_e event = NNS_EDGE_EVENT_UNKNOWN; - 557 54 : nns_edge_data_h data_h = NULL; - 558 54 : int ret = NNS_EDGE_ERROR_NONE; + 556 55 : nns_edge_event_e event = NNS_EDGE_EVENT_UNKNOWN; + 557 55 : nns_edge_data_h data_h = NULL; + 558 55 : int ret = NNS_EDGE_ERROR_NONE; 559 : - 560 54 : ret = nns_edge_event_get_type (event_h, &event); - 561 54 : if (NNS_EDGE_ERROR_NONE != ret) - 562 54 : return ret; + 560 55 : ret = nns_edge_event_get_type (event_h, &event); + 561 55 : if (NNS_EDGE_ERROR_NONE != ret) + 562 55 : return ret; 563 : - 564 54 : switch (event) { + 564 55 : switch (event) { 565 17 : case NNS_EDGE_EVENT_NEW_DATA_RECEIVED: 566 : { 567 17 : ret = nns_edge_event_parse_new_data (event_h, &data_h); @@ -633,14 +633,14 @@ 571 17 : ret = _mlrs_process_service_offloading (data_h, user_data); 572 17 : break; 573 : } - 574 37 : default: - 575 37 : break; + 574 38 : default: + 575 38 : break; 576 : } 577 : - 578 54 : if (data_h) + 578 55 : if (data_h) 579 17 : nns_edge_data_destroy (data_h); 580 : - 581 54 : return ret; + 581 55 : return ret; 582 : } 583 : 584 : /** diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html index 1217cc345..b12681281 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 83.0 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html index 5a8afcd11..a04af5d9e 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 83.0 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html index b24cc0f27..db4514108 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 83.0 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html index f87064cba..5e205530e 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 87.6 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html index 42bba0063..a959b98c6 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 87.6 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html index 037cc592f..57a1371f9 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 87.6 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html index a331e64f1..0bb6d7cdf 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 86.8 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html index 0a0fa36d2..a5f407e4d 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 86.8 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html index e434b75d2..4be6785be 100644 --- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html +++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c @@ -28,7 +28,7 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: 86.8 % @@ -37,7 +37,7 @@ Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: 100.0 % diff --git a/testresult/ml-api/cmd_line b/testresult/ml-api/cmd_line index 6b91f329c..4269fee06 100644 --- a/testresult/ml-api/cmd_line +++ b/testresult/ml-api/cmd_line @@ -1 +1 @@ -genhtml -o result unittest-filtered.info -t 'ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491' --ignore-errors source -p /home/abuild/rpmbuild/BUILD +genhtml -o result unittest-filtered.info -t 'ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0' --ignore-errors source -p /home/abuild/rpmbuild/BUILD diff --git a/testresult/ml-api/coverage_badge.svg b/testresult/ml-api/coverage_badge.svg index 39c62830e..6185bd92d 100644 --- a/testresult/ml-api/coverage_badge.svg +++ b/testresult/ml-api/coverage_badge.svg @@ -1 +1 @@ -coveragecoverage83.26%83.26% \ No newline at end of file +coveragecoverage83.12%83.12% \ No newline at end of file diff --git a/testresult/ml-api/index-sort-f.html b/testresult/ml-api/index-sort-f.html index e65d442f8..abf178cb8 100644 --- a/testresult/ml-api/index-sort-f.html +++ b/testresult/ml-api/index-sort-f.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 @@ -28,21 +28,21 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: - 83.3 % + 83.1 % 5160 - 4296 + 4289 Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: - 96.9 % + 96.6 % 294 - 285 + 284 @@ -82,14 +82,14 @@ capi-machine-learning-inference-1.8.6/c/src -
83.3%83.3%
+
83.1%83.1%
- 83.3 % + 83.1 % 5160 - 4296 - 96.9 % + 4289 + 96.6 % 294 - 285 + 284 diff --git a/testresult/ml-api/index-sort-l.html b/testresult/ml-api/index-sort-l.html index 709bf796d..9d263fd26 100644 --- a/testresult/ml-api/index-sort-l.html +++ b/testresult/ml-api/index-sort-l.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 @@ -28,21 +28,21 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: - 83.3 % + 83.1 % 5160 - 4296 + 4289 Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: - 96.9 % + 96.6 % 294 - 285 + 284 @@ -82,14 +82,14 @@ capi-machine-learning-inference-1.8.6/c/src -
83.3%83.3%
+
83.1%83.1%
- 83.3 % + 83.1 % 5160 - 4296 - 96.9 % + 4289 + 96.6 % 294 - 285 + 284 diff --git a/testresult/ml-api/index.html b/testresult/ml-api/index.html index 2864cecfb..e418c49f3 100644 --- a/testresult/ml-api/index.html +++ b/testresult/ml-api/index.html @@ -4,7 +4,7 @@ - LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 @@ -28,21 +28,21 @@ Test: - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 + ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 Lines: - 83.3 % + 83.1 % 5160 - 4296 + 4289 Test Date: - 2024-12-23 05:16:55 + 2024-12-23 05:24:11 Functions: - 96.9 % + 96.6 % 294 - 285 + 284 @@ -82,14 +82,14 @@ capi-machine-learning-inference-1.8.6/c/src -
83.3%83.3%
+
83.1%83.1%
- 83.3 % + 83.1 % 5160 - 4296 - 96.9 % + 4289 + 96.6 % 294 - 285 + 284