diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html
index faf470ae..21380627 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -79,6 +79,18 @@
Total |
Hit |
+
+ ml-api-inference-single.c |
+
+
+ |
+ 75.1 % |
+ 855 |
+ 642 |
+ 92.3 % |
+ 39 |
+ 36 |
+
ml-api-inference-pipeline.c |
@@ -91,18 +103,6 @@
| 84 |
79 |
-
- ml-api-inference-single.c |
-
-
- |
- 75.9 % |
- 855 |
- 649 |
- 94.9 % |
- 39 |
- 37 |
-
ml-api-common.c |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html
index 75421839..9a1b44a2 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src
@@ -28,21 +28,21 @@
|
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -82,14 +82,14 @@
ml-api-inference-single.c |
-
+
|
- 75.9 % |
+ 75.1 % |
855 |
- 649 |
- 94.9 % |
+ 642 |
+ 92.3 % |
39 |
- 37 |
+ 36 |
ml-api-service-offloading.c |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html
index 9cc49dc9..04390e0a 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -118,14 +118,14 @@
ml-api-inference-single.c |
-
+
|
- 75.9 % |
+ 75.1 % |
855 |
- 649 |
- 94.9 % |
+ 642 |
+ 92.3 % |
39 |
- 37 |
+ 36 |
ml-api-service-agent-client.c |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html
index 91c08c5d..eafcc808 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -317,7 +317,7 @@
ml_tensors_data_destroy |
- 483 |
+ 485 |
@@ -380,77 +380,77 @@
ml_tensors_data_create |
- 6850 |
+ 6651 |
ml_tensors_info_validate |
- 6944 |
+ 6745 |
_ml_tensors_info_create_from |
- 7120 |
+ 6921 |
ml_tensors_info_clone |
- 7133 |
+ 6934 |
_ml_tensors_data_destroy_internal |
- 13765 |
+ 13376 |
_ml_tensors_data_create_no_alloc |
- 13792 |
+ 13401 |
ml_tensors_info_destroy |
- 20959 |
+ 20371 |
ml_tensors_info_create |
- 20975 |
+ 20385 |
_ml_tensors_info_free |
- 20998 |
+ 20410 |
_ml_tensors_info_initialize |
- 21009 |
+ 20419 |
_ml_tensors_info_create_internal |
- 21010 |
+ 20420 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html
index ef75c4fd..df8c2b52 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -163,14 +163,14 @@
_ml_tensors_data_create_no_alloc |
- 13792 |
+ 13401 |
_ml_tensors_data_destroy_internal |
- 13765 |
+ 13376 |
@@ -184,28 +184,28 @@
_ml_tensors_info_create_from |
- 7120 |
+ 6921 |
_ml_tensors_info_create_internal |
- 21010 |
+ 20420 |
_ml_tensors_info_free |
- 20998 |
+ 20410 |
_ml_tensors_info_initialize |
- 21009 |
+ 20419 |
@@ -324,14 +324,14 @@
ml_tensors_data_create |
- 6850 |
+ 6651 |
ml_tensors_data_destroy |
- 483 |
+ 485 |
@@ -359,14 +359,14 @@
ml_tensors_info_clone |
- 7133 |
+ 6934 |
ml_tensors_info_create |
- 20975 |
+ 20385 |
@@ -380,7 +380,7 @@
ml_tensors_info_destroy |
- 20959 |
+ 20371 |
@@ -450,7 +450,7 @@
ml_tensors_info_validate |
- 6944 |
+ 6745 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html
index a542953b..07740bda 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -184,61 +184,61 @@
122 : * @brief Internal function to create tensors-info handle.
123 : */
124 : static int
- 125 21010 : _ml_tensors_info_create_internal (ml_tensors_info_h * info, bool extended)
+ 125 20420 : _ml_tensors_info_create_internal (ml_tensors_info_h * info, bool extended)
126 : {
127 : ml_tensors_info_s *tensors_info;
128 :
- 129 21010 : check_feature_state (ML_FEATURE);
+ 129 20420 : check_feature_state (ML_FEATURE);
130 :
- 131 21010 : if (!info)
+ 131 20420 : if (!info)
132 2 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
133 : "The parameter, info, is NULL. Provide a valid pointer.");
134 :
- 135 21008 : *info = tensors_info = g_new0 (ml_tensors_info_s, 1);
- 136 21008 : if (tensors_info == NULL)
+ 135 20418 : *info = tensors_info = g_new0 (ml_tensors_info_s, 1);
+ 136 20418 : if (tensors_info == NULL)
137 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
138 : "Failed to allocate the tensors info handle. Out of memory?");
139 :
- 140 21008 : g_mutex_init (&tensors_info->lock);
- 141 21008 : tensors_info->is_extended = extended;
+ 140 20418 : g_mutex_init (&tensors_info->lock);
+ 141 20418 : tensors_info->is_extended = extended;
142 :
143 : /* init tensors info struct */
- 144 21008 : return _ml_tensors_info_initialize (tensors_info);
+ 144 20418 : return _ml_tensors_info_initialize (tensors_info);
145 : }
146 :
147 : /**
148 : * @brief Creates new tensors-info handle and copies tensors information.
149 : */
150 : int
- 151 7120 : _ml_tensors_info_create_from (const ml_tensors_info_h in,
+ 151 6921 : _ml_tensors_info_create_from (const ml_tensors_info_h in,
152 : ml_tensors_info_h * out)
153 : {
154 : ml_tensors_info_s *_info;
155 : int status;
156 :
- 157 7120 : if (!in || !out)
+ 157 6921 : if (!in || !out)
158 0 : return ML_ERROR_INVALID_PARAMETER;
159 :
- 160 7120 : _info = (ml_tensors_info_s *) in;
+ 160 6921 : _info = (ml_tensors_info_s *) in;
161 :
- 162 7120 : if (_info->is_extended)
+ 162 6921 : if (_info->is_extended)
163 7 : status = ml_tensors_info_create_extended (out);
164 : else
- 165 7113 : status = ml_tensors_info_create (out);
+ 165 6914 : status = ml_tensors_info_create (out);
166 :
- 167 7120 : if (status == ML_ERROR_NONE)
- 168 7120 : status = ml_tensors_info_clone (*out, in);
+ 167 6921 : if (status == ML_ERROR_NONE)
+ 168 6921 : status = ml_tensors_info_clone (*out, in);
169 :
- 170 7120 : return status;
+ 170 6921 : return status;
171 : }
172 :
173 : /**
174 : * @brief Allocates a tensors information handle with default value.
175 : */
176 : int
- 177 20975 : ml_tensors_info_create (ml_tensors_info_h * info)
+ 177 20385 : ml_tensors_info_create (ml_tensors_info_h * info)
178 : {
- 179 20975 : return _ml_tensors_info_create_internal (info, false);
+ 179 20385 : return _ml_tensors_info_create_internal (info, false);
180 : }
181 :
182 : /**
@@ -254,53 +254,53 @@
192 : * @brief Frees the given handle of a tensors information.
193 : */
194 : int
- 195 20959 : ml_tensors_info_destroy (ml_tensors_info_h info)
+ 195 20371 : ml_tensors_info_destroy (ml_tensors_info_h info)
196 : {
197 : ml_tensors_info_s *tensors_info;
198 :
- 199 20959 : check_feature_state (ML_FEATURE);
+ 199 20371 : check_feature_state (ML_FEATURE);
200 :
- 201 20959 : tensors_info = (ml_tensors_info_s *) info;
+ 201 20371 : tensors_info = (ml_tensors_info_s *) info;
202 :
- 203 20959 : if (!tensors_info)
+ 203 20371 : if (!tensors_info)
204 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
205 : "The parameter, info, is NULL. Provide a valid pointer.");
206 :
- 207 20958 : G_LOCK_UNLESS_NOLOCK (*tensors_info);
- 208 20958 : _ml_tensors_info_free (tensors_info);
- 209 20958 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ 207 20370 : G_LOCK_UNLESS_NOLOCK (*tensors_info);
+ 208 20370 : _ml_tensors_info_free (tensors_info);
+ 209 20370 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
210 :
- 211 20958 : g_mutex_clear (&tensors_info->lock);
- 212 20958 : g_free (tensors_info);
+ 211 20370 : g_mutex_clear (&tensors_info->lock);
+ 212 20370 : g_free (tensors_info);
213 :
- 214 20958 : return ML_ERROR_NONE;
+ 214 20370 : return ML_ERROR_NONE;
215 : }
216 :
217 : /**
218 : * @brief Validates the given tensors info is valid.
219 : */
220 : int
- 221 6944 : ml_tensors_info_validate (const ml_tensors_info_h info, bool *valid)
+ 221 6745 : ml_tensors_info_validate (const ml_tensors_info_h info, bool *valid)
222 : {
223 : ml_tensors_info_s *tensors_info;
224 :
- 225 6944 : check_feature_state (ML_FEATURE);
+ 225 6745 : check_feature_state (ML_FEATURE);
226 :
- 227 6944 : if (!valid)
+ 227 6745 : if (!valid)
228 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
229 : "The data-return parameter, valid, is NULL. It should be a pointer pre-allocated by the caller.");
230 :
- 231 6943 : tensors_info = (ml_tensors_info_s *) info;
+ 231 6744 : tensors_info = (ml_tensors_info_s *) info;
232 :
- 233 6943 : if (!tensors_info)
+ 233 6744 : if (!tensors_info)
234 3 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
235 : "The input parameter, tensors_info, is NULL. It should be a valid ml_tensors_info_h, which is usually created by ml_tensors_info_create().");
236 :
- 237 6940 : G_LOCK_UNLESS_NOLOCK (*tensors_info);
- 238 6940 : *valid = gst_tensors_info_validate (&tensors_info->info);
- 239 6940 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ 237 6741 : G_LOCK_UNLESS_NOLOCK (*tensors_info);
+ 238 6741 : *valid = gst_tensors_info_validate (&tensors_info->info);
+ 239 6741 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
240 :
- 241 6940 : return ML_ERROR_NONE;
+ 241 6741 : return ML_ERROR_NONE;
242 : }
243 :
244 : /**
@@ -711,15 +711,15 @@
649 : * @brief Initializes the tensors information with default value.
650 : */
651 : int
- 652 21009 : _ml_tensors_info_initialize (ml_tensors_info_s * info)
+ 652 20419 : _ml_tensors_info_initialize (ml_tensors_info_s * info)
653 : {
- 654 21009 : if (!info)
+ 654 20419 : if (!info)
655 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
656 : "The parameter, info, is NULL. Provide a valid pointer.");
657 :
- 658 21008 : gst_tensors_info_init (&info->info);
+ 658 20418 : gst_tensors_info_init (&info->info);
659 :
- 660 21008 : return ML_ERROR_NONE;
+ 660 20418 : return ML_ERROR_NONE;
661 : }
662 :
663 : /**
@@ -727,12 +727,12 @@
665 : * @note This does not touch the lock. The caller should lock.
666 : */
667 : void
- 668 20998 : _ml_tensors_info_free (ml_tensors_info_s * info)
+ 668 20410 : _ml_tensors_info_free (ml_tensors_info_s * info)
669 : {
- 670 20998 : if (!info)
+ 670 20410 : if (!info)
671 0 : return;
672 :
- 673 20998 : gst_tensors_info_free (&info->info);
+ 673 20410 : gst_tensors_info_free (&info->info);
674 : }
675 :
676 : /**
@@ -742,21 +742,21 @@
680 : * @return @c 0 on success. Otherwise a negative error value.
681 : */
682 : int
- 683 13765 : _ml_tensors_data_destroy_internal (ml_tensors_data_h data, gboolean free_data)
+ 683 13376 : _ml_tensors_data_destroy_internal (ml_tensors_data_h data, gboolean free_data)
684 : {
- 685 13765 : int status = ML_ERROR_NONE;
+ 685 13376 : int status = ML_ERROR_NONE;
686 : ml_tensors_data_s *_data;
687 : guint i;
688 :
- 689 13765 : if (data == NULL)
+ 689 13376 : if (data == NULL)
690 19 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
691 : "The parameter, data, is NULL. It should be a valid ml_tensors_data_h handle, which is usually created by ml_tensors_data_create ().");
692 :
- 693 13746 : _data = (ml_tensors_data_s *) data;
- 694 13746 : G_LOCK_UNLESS_NOLOCK (*_data);
+ 693 13357 : _data = (ml_tensors_data_s *) data;
+ 694 13357 : G_LOCK_UNLESS_NOLOCK (*_data);
695 :
- 696 13746 : if (free_data) {
- 697 464 : if (_data->destroy) {
+ 696 13357 : if (free_data) {
+ 697 466 : if (_data->destroy) {
698 0 : status = _data->destroy (_data, _data->user_data);
699 0 : if (status != ML_ERROR_NONE) {
700 0 : G_UNLOCK_UNLESS_NOLOCK (*_data);
@@ -765,22 +765,22 @@
703 : status);
704 : }
705 : } else {
- 706 119248 : for (i = 0; i < ML_TENSOR_SIZE_LIMIT; i++) {
- 707 118784 : if (_data->tensors[i].data) {
- 708 688 : g_free (_data->tensors[i].data);
- 709 688 : _data->tensors[i].data = NULL;
+ 706 119762 : for (i = 0; i < ML_TENSOR_SIZE_LIMIT; i++) {
+ 707 119296 : if (_data->tensors[i].data) {
+ 708 690 : g_free (_data->tensors[i].data);
+ 709 690 : _data->tensors[i].data = NULL;
710 : }
711 : }
712 : }
713 : }
714 :
- 715 13746 : if (_data->info)
- 716 13746 : ml_tensors_info_destroy (_data->info);
+ 715 13357 : if (_data->info)
+ 716 13357 : ml_tensors_info_destroy (_data->info);
717 :
- 718 13746 : G_UNLOCK_UNLESS_NOLOCK (*_data);
- 719 13746 : g_mutex_clear (&_data->lock);
- 720 13746 : g_free (_data);
- 721 13746 : return status;
+ 718 13357 : G_UNLOCK_UNLESS_NOLOCK (*_data);
+ 719 13357 : g_mutex_clear (&_data->lock);
+ 720 13357 : g_free (_data);
+ 721 13357 : return status;
722 : }
723 :
724 : /**
@@ -788,15 +788,15 @@
726 : * @note This does not touch the lock
727 : */
728 : int
- 729 483 : ml_tensors_data_destroy (ml_tensors_data_h data)
+ 729 485 : ml_tensors_data_destroy (ml_tensors_data_h data)
730 : {
731 : int ret;
- 732 483 : check_feature_state (ML_FEATURE);
- 733 483 : ret = _ml_tensors_data_destroy_internal (data, TRUE);
- 734 483 : if (ret != ML_ERROR_NONE)
+ 732 485 : check_feature_state (ML_FEATURE);
+ 733 485 : ret = _ml_tensors_data_destroy_internal (data, TRUE);
+ 734 485 : if (ret != ML_ERROR_NONE)
735 19 : _ml_error_report_return_continue (ret,
736 : "Call to _ml_tensors_data_destroy_internal failed with %d", ret);
- 737 464 : return ret;
+ 737 466 : return ret;
738 : }
739 :
740 : /**
@@ -804,56 +804,56 @@
742 : * @note Memory for tensor data buffers is not allocated.
743 : */
744 : int
- 745 13792 : _ml_tensors_data_create_no_alloc (const ml_tensors_info_h info,
+ 745 13401 : _ml_tensors_data_create_no_alloc (const ml_tensors_info_h info,
746 : ml_tensors_data_h * data)
747 : {
748 : ml_tensors_data_s *_data;
749 : ml_tensors_info_s *_info;
750 : guint i;
- 751 13792 : int status = ML_ERROR_NONE;
+ 751 13401 : int status = ML_ERROR_NONE;
752 :
- 753 13792 : check_feature_state (ML_FEATURE);
+ 753 13401 : check_feature_state (ML_FEATURE);
754 :
- 755 13792 : if (data == NULL)
+ 755 13401 : if (data == NULL)
756 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
757 : "The parameter, data, is NULL. It should be a valid ml_tensors_info_h handle that may hold a space for ml_tensors_info_h. E.g., ml_tensors_data_h data; _ml_tensors_data_create_no_alloc (info, &data);.");
758 :
759 : /* init null */
- 760 13791 : *data = NULL;
+ 760 13400 : *data = NULL;
761 :
- 762 13791 : _data = g_new0 (ml_tensors_data_s, 1);
- 763 13791 : if (!_data)
+ 762 13400 : _data = g_new0 (ml_tensors_data_s, 1);
+ 763 13400 : if (!_data)
764 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
765 : "Failed to allocate memory for tensors data. Probably the system is out of memory.");
766 :
- 767 13791 : g_mutex_init (&_data->lock);
+ 767 13400 : g_mutex_init (&_data->lock);
768 :
- 769 13791 : _info = (ml_tensors_info_s *) info;
- 770 13791 : if (_info != NULL) {
- 771 7111 : status = _ml_tensors_info_create_from (info, &_data->info);
- 772 7111 : if (status != ML_ERROR_NONE) {
+ 769 13400 : _info = (ml_tensors_info_s *) info;
+ 770 13400 : if (_info != NULL) {
+ 771 6912 : status = _ml_tensors_info_create_from (info, &_data->info);
+ 772 6912 : if (status != ML_ERROR_NONE) {
773 0 : _ml_error_report_continue
774 : ("Failed to create internal information handle for tensors data.");
775 0 : goto error;
776 : }
777 :
- 778 7111 : G_LOCK_UNLESS_NOLOCK (*_info);
- 779 7111 : _data->num_tensors = _info->info.num_tensors;
- 780 14662 : for (i = 0; i < _data->num_tensors; i++) {
- 781 7551 : _data->tensors[i].size = gst_tensors_info_get_size (&_info->info, i);
- 782 7551 : _data->tensors[i].data = NULL;
+ 778 6912 : G_LOCK_UNLESS_NOLOCK (*_info);
+ 779 6912 : _data->num_tensors = _info->info.num_tensors;
+ 780 14264 : for (i = 0; i < _data->num_tensors; i++) {
+ 781 7352 : _data->tensors[i].size = gst_tensors_info_get_size (&_info->info, i);
+ 782 7352 : _data->tensors[i].data = NULL;
783 : }
- 784 7111 : G_UNLOCK_UNLESS_NOLOCK (*_info);
+ 784 6912 : G_UNLOCK_UNLESS_NOLOCK (*_info);
785 : }
786 :
- 787 6680 : error:
- 788 13791 : if (status == ML_ERROR_NONE) {
- 789 13791 : *data = _data;
+ 787 6488 : error:
+ 788 13400 : if (status == ML_ERROR_NONE) {
+ 789 13400 : *data = _data;
790 : } else {
791 0 : _ml_tensors_data_destroy_internal (_data, FALSE);
792 : }
793 :
- 794 13791 : return status;
+ 794 13400 : return status;
795 : }
796 :
797 : /**
@@ -973,48 +973,48 @@
911 : * @brief Allocates a tensor data frame with the given tensors info. (more info in nnstreamer.h)
912 : */
913 : int
- 914 6850 : ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_data_h * data)
+ 914 6651 : ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_data_h * data)
915 : {
- 916 6850 : gint status = ML_ERROR_STREAMS_PIPE;
- 917 6850 : ml_tensors_data_s *_data = NULL;
+ 916 6651 : gint status = ML_ERROR_STREAMS_PIPE;
+ 917 6651 : ml_tensors_data_s *_data = NULL;
918 : guint i;
919 : bool valid;
920 :
- 921 13700 : check_feature_state (ML_FEATURE);
+ 921 13302 : check_feature_state (ML_FEATURE);
922 :
- 923 6850 : if (info == NULL)
+ 923 6651 : if (info == NULL)
924 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
925 : "The parameter, info, is NULL. It should be a valid pointer of ml_tensors_info_h, which is usually created by ml_tensors_info_create().");
- 926 6849 : if (data == NULL)
+ 926 6650 : if (data == NULL)
927 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
928 : "The parameter, data, is NULL. It should be a valid space to hold a ml_tensors_data_h handle. E.g., ml_tensors_data_h data; ml_tensors_data_create (info, &data);.");
929 :
- 930 6848 : status = ml_tensors_info_validate (info, &valid);
- 931 6848 : if (status != ML_ERROR_NONE)
+ 930 6649 : status = ml_tensors_info_validate (info, &valid);
+ 931 6649 : if (status != ML_ERROR_NONE)
932 0 : _ml_error_report_return_continue (status,
933 : "ml_tensors_info_validate() has reported that the parameter, info, is not NULL, but its contents are not valid. The user must provide a valid tensor information with it.");
- 934 6848 : if (!valid)
+ 934 6649 : if (!valid)
935 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
936 : "The parameter, info, is not NULL, but its contents are not valid. The user must provide a valid tensor information with it. Probably, there is an entry that is not allocated or dimension/type information not available. The given info should have valid number of tensors, entries of every tensor along with its type and dimension info.");
937 :
938 : status =
- 939 6847 : _ml_tensors_data_create_no_alloc (info, (ml_tensors_data_h *) & _data);
+ 939 6648 : _ml_tensors_data_create_no_alloc (info, (ml_tensors_data_h *) & _data);
940 :
- 941 6847 : if (status != ML_ERROR_NONE) {
+ 941 6648 : if (status != ML_ERROR_NONE) {
942 0 : _ml_error_report_return_continue (status,
943 : "Failed to allocate tensor data based on the given info with the call to _ml_tensors_data_create_no_alloc (): %d. Check if it's out-of-memory.",
944 : status);
945 : }
946 :
- 947 13916 : for (i = 0; i < _data->num_tensors; i++) {
- 948 7069 : _data->tensors[i].data = g_malloc0 (_data->tensors[i].size);
- 949 7069 : if (_data->tensors[i].data == NULL) {
+ 947 13518 : for (i = 0; i < _data->num_tensors; i++) {
+ 948 6870 : _data->tensors[i].data = g_malloc0 (_data->tensors[i].size);
+ 949 6870 : if (_data->tensors[i].data == NULL) {
950 0 : goto failed_oom;
951 : }
952 : }
953 :
- 954 6847 : *data = _data;
- 955 6847 : return ML_ERROR_NONE;
+ 954 6648 : *data = _data;
+ 955 6648 : return ML_ERROR_NONE;
956 :
957 0 : failed_oom:
958 0 : _ml_tensors_data_destroy_internal (_data, TRUE);
@@ -1115,29 +1115,29 @@
1053 : * @brief Copies tensor meta info.
1054 : */
1055 : int
- 1056 7133 : ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src)
+ 1056 6934 : ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src)
1057 : {
1058 : ml_tensors_info_s *dest_info, *src_info;
- 1059 7133 : int status = ML_ERROR_NONE;
+ 1059 6934 : int status = ML_ERROR_NONE;
1060 :
- 1061 7133 : check_feature_state (ML_FEATURE);
+ 1061 6934 : check_feature_state (ML_FEATURE);
1062 :
- 1063 7133 : dest_info = (ml_tensors_info_s *) dest;
- 1064 7133 : src_info = (ml_tensors_info_s *) src;
+ 1063 6934 : dest_info = (ml_tensors_info_s *) dest;
+ 1064 6934 : src_info = (ml_tensors_info_s *) src;
1065 :
- 1066 7133 : if (!dest_info)
+ 1066 6934 : if (!dest_info)
1067 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1068 : "The parameter, dest, is NULL. It should be an allocated handle (ml_tensors_info_h), usually allocated by ml_tensors_info_create ().");
- 1069 7132 : if (!src_info)
+ 1069 6933 : if (!src_info)
1070 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1071 : "The parameter, src, is NULL. It should be a handle (ml_tensors_info_h) with valid data.");
1072 :
- 1073 7131 : G_LOCK_UNLESS_NOLOCK (*dest_info);
- 1074 7131 : G_LOCK_UNLESS_NOLOCK (*src_info);
+ 1073 6932 : G_LOCK_UNLESS_NOLOCK (*dest_info);
+ 1074 6932 : G_LOCK_UNLESS_NOLOCK (*src_info);
1075 :
- 1076 7131 : if (gst_tensors_info_validate (&src_info->info)) {
- 1077 7131 : dest_info->is_extended = src_info->is_extended;
- 1078 7131 : gst_tensors_info_copy (&dest_info->info, &src_info->info);
+ 1076 6932 : if (gst_tensors_info_validate (&src_info->info)) {
+ 1077 6932 : dest_info->is_extended = src_info->is_extended;
+ 1078 6932 : gst_tensors_info_copy (&dest_info->info, &src_info->info);
1079 : } else {
1080 0 : _ml_error_report
1081 : ("The parameter, src, is a ml_tensors_info_h handle without valid data. Every tensor-info of tensors-info should have a valid type and dimension information and the number of tensors should be between 1 and %d.",
@@ -1145,10 +1145,10 @@
1083 0 : status = ML_ERROR_INVALID_PARAMETER;
1084 : }
1085 :
- 1086 7131 : G_UNLOCK_UNLESS_NOLOCK (*src_info);
- 1087 7131 : G_UNLOCK_UNLESS_NOLOCK (*dest_info);
+ 1086 6932 : G_UNLOCK_UNLESS_NOLOCK (*src_info);
+ 1087 6932 : G_UNLOCK_UNLESS_NOLOCK (*dest_info);
1088 :
- 1089 7131 : return status;
+ 1089 6932 : return status;
1090 : }
1091 :
1092 : /**
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html
index 7b5122bb..598cd39c 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -65,28 +65,28 @@
_ml_tensors_info_copy_from_ml |
- 6846 |
+ 6647 |
_ml_tensors_info_create_from_gst |
- 13711 |
+ 13320 |
_ml_tensors_info_copy_from_gst |
- 13759 |
+ 13368 |
gst_info_is_extended |
- 27466 |
+ 26684 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html
index c147053c..31dc6182 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -65,28 +65,28 @@
_ml_tensors_info_copy_from_gst |
- 13759 |
+ 13368 |
_ml_tensors_info_copy_from_ml |
- 6846 |
+ 6647 |
_ml_tensors_info_create_from_gst |
- 13711 |
+ 13320 |
gst_info_is_extended |
- 27466 |
+ 26684 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html
index 109bdd4a..aafbd464 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -83,54 +83,54 @@
21 : * @brief Check tensor-info has extended rank value.
22 : */
23 : static gboolean
- 24 27466 : gst_info_is_extended (const GstTensorsInfo * gst_info)
+ 24 26684 : gst_info_is_extended (const GstTensorsInfo * gst_info)
25 : {
26 : GstTensorInfo *_info;
27 : guint i;
28 :
- 29 56234 : for (i = 0; i < gst_info->num_tensors; i++) {
- 30 28792 : _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) gst_info, i);
- 31 28792 : if (!_info)
+ 29 54670 : for (i = 0; i < gst_info->num_tensors; i++) {
+ 30 28010 : _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) gst_info, i);
+ 31 28010 : if (!_info)
32 0 : _ml_error_report_return (FALSE,
33 : "The parameter, gst_info, has invalid number of tensors. The max number of tensors is "
34 : NNS_TENSOR_SIZE_LIMIT_STR);
35 :
- 36 28792 : if (_info->dimension[ML_TENSOR_RANK_LIMIT_PREV] > 0)
+ 36 28010 : if (_info->dimension[ML_TENSOR_RANK_LIMIT_PREV] > 0)
37 24 : return TRUE;
38 : }
39 :
- 40 27442 : return FALSE;
+ 40 26660 : return FALSE;
41 : }
42 :
43 : /**
44 : * @brief Allocates a tensors information handle from gst info.
45 : */
46 : int
- 47 13711 : _ml_tensors_info_create_from_gst (ml_tensors_info_h * ml_info,
+ 47 13320 : _ml_tensors_info_create_from_gst (ml_tensors_info_h * ml_info,
48 : GstTensorsInfo * gst_info)
49 : {
50 : gboolean is_extended;
51 :
- 52 13711 : if (!ml_info)
+ 52 13320 : if (!ml_info)
53 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
54 : "The parameter, ml_info, is NULL. It should be a valid ml_tensors_info_h instance usually created by ml_tensors_info_create(). This could be an internal bug of ML API.");
55 :
- 56 13710 : if (!gst_info)
+ 56 13319 : if (!gst_info)
57 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
58 : "The parameter, gst_info, is NULL. It should be a valid GstTensorsInfo instance. This could be an internal bug of ML API.");
59 :
- 60 13709 : is_extended = gst_info_is_extended (gst_info);
- 61 13709 : if (is_extended)
+ 60 13318 : is_extended = gst_info_is_extended (gst_info);
+ 61 13318 : if (is_extended)
62 7 : _ml_error_report_return_continue_iferr
63 : (ml_tensors_info_create_extended (ml_info),
64 : "The call to ml_tensors_info_create_extended has failed with %d.",
65 : _ERRNO);
66 : else
- 67 13702 : _ml_error_report_return_continue_iferr (ml_tensors_info_create (ml_info),
+ 67 13311 : _ml_error_report_return_continue_iferr (ml_tensors_info_create (ml_info),
68 : "The call to ml_tensors_info_create has failed with %d.", _ERRNO);
69 :
- 70 13709 : _ml_tensors_info_copy_from_gst (*ml_info, gst_info);
- 71 13709 : return ML_ERROR_NONE;
+ 70 13318 : _ml_tensors_info_copy_from_gst (*ml_info, gst_info);
+ 71 13318 : return ML_ERROR_NONE;
72 : }
73 :
74 : /**
@@ -138,26 +138,26 @@
76 : * @bug Thread safety required. Check its internal users first!
77 : */
78 : int
- 79 13759 : _ml_tensors_info_copy_from_gst (ml_tensors_info_h ml_info,
+ 79 13368 : _ml_tensors_info_copy_from_gst (ml_tensors_info_h ml_info,
80 : const GstTensorsInfo * gst_info)
81 : {
82 : ml_tensors_info_s *_info;
83 :
- 84 13759 : if (!ml_info)
+ 84 13368 : if (!ml_info)
85 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
86 : "The parmater, ml_info, is NULL. It should be a valid ml_tensors_info_s instance, usually created by ml_tensors_info_create(). This is probably an internal bug of ML API.");
- 87 13758 : if (!gst_info)
+ 87 13367 : if (!gst_info)
88 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
89 : "The parmater, gst_info, is NULL. It should be a valid GstTensorsInfo instance. This is probably an internal bug of ML API.");
90 :
- 91 13757 : _info = (ml_tensors_info_s *) ml_info;
+ 91 13366 : _info = (ml_tensors_info_s *) ml_info;
92 :
- 93 13757 : G_LOCK_UNLESS_NOLOCK (*_info);
- 94 13757 : _info->is_extended = gst_info_is_extended (gst_info);
- 95 13757 : gst_tensors_info_copy (&_info->info, gst_info);
- 96 13757 : G_UNLOCK_UNLESS_NOLOCK (*_info);
+ 93 13366 : G_LOCK_UNLESS_NOLOCK (*_info);
+ 94 13366 : _info->is_extended = gst_info_is_extended (gst_info);
+ 95 13366 : gst_tensors_info_copy (&_info->info, gst_info);
+ 96 13366 : G_UNLOCK_UNLESS_NOLOCK (*_info);
97 :
- 98 13757 : return ML_ERROR_NONE;
+ 98 13366 : return ML_ERROR_NONE;
99 : }
100 :
101 : /**
@@ -165,25 +165,25 @@
103 : * @bug Thread safety required. Check its internal users first!
104 : */
105 : int
- 106 6846 : _ml_tensors_info_copy_from_ml (GstTensorsInfo * gst_info,
+ 106 6647 : _ml_tensors_info_copy_from_ml (GstTensorsInfo * gst_info,
107 : const ml_tensors_info_h ml_info)
108 : {
109 : ml_tensors_info_s *_info;
110 :
- 111 6846 : if (!ml_info)
+ 111 6647 : if (!ml_info)
112 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
113 : "The parmater, ml_info, is NULL. It should be a valid ml_tensors_info_s instance, usually created by ml_tensors_info_create(). This is probably an internal bug of ML API.");
- 114 6845 : if (!gst_info)
+ 114 6646 : if (!gst_info)
115 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
116 : "The parmater, gst_info, is NULL. It should be a valid GstTensorsInfo instance. This is probably an internal bug of ML API.");
117 :
- 118 6844 : _info = (ml_tensors_info_s *) ml_info;
+ 118 6645 : _info = (ml_tensors_info_s *) ml_info;
119 :
- 120 6844 : G_LOCK_UNLESS_NOLOCK (*_info);
- 121 6844 : gst_tensors_info_copy (gst_info, &_info->info);
- 122 6844 : G_UNLOCK_UNLESS_NOLOCK (*_info);
+ 120 6645 : G_LOCK_UNLESS_NOLOCK (*_info);
+ 121 6645 : gst_tensors_info_copy (gst_info, &_info->info);
+ 122 6645 : G_UNLOCK_UNLESS_NOLOCK (*_info);
123 :
- 124 6844 : return ML_ERROR_NONE;
+ 124 6645 : return ML_ERROR_NONE;
125 : }
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html
index f3328f81..12172811 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -597,56 +597,56 @@
cb_bus_sync_message |
- 5410 |
+ 5411 |
_pipe_src_cb_need_data |
- 6572 |
+ 6373 |
get_app_src_callback |
- 6572 |
+ 6373 |
ml_pipeline_src_get_tensors_info |
- 6576 |
+ 6377 |
ml_pipeline_src_input_data |
- 6660 |
+ 6461 |
cb_sink_event |
- 6680 |
+ 6488 |
ml_pipeline_src_parse_tensors_info |
- 13278 |
+ 12880 |
get_tensors_info_from_caps |
- 13306 |
+ 12908 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html
index 6cc1542e..39909bec 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -100,7 +100,7 @@
_pipe_src_cb_need_data |
- 6572 |
+ 6373 |
@@ -121,14 +121,14 @@
cb_bus_sync_message |
- 5410 |
+ 5411 |
cb_sink_event |
- 6680 |
+ 6488 |
@@ -184,7 +184,7 @@
get_app_src_callback |
- 6572 |
+ 6373 |
@@ -198,7 +198,7 @@
get_tensors_info_from_caps |
- 13306 |
+ 12908 |
@@ -492,21 +492,21 @@
ml_pipeline_src_get_tensors_info |
- 6576 |
+ 6377 |
ml_pipeline_src_input_data |
- 6660 |
+ 6461 |
ml_pipeline_src_parse_tensors_info |
- 13278 |
+ 12880 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html
index 4f1d25af..b16a046d 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -315,69 +315,69 @@
253 : * @brief Internal function to get the tensors info from the element caps.
254 : */
255 : static gboolean
- 256 13306 : get_tensors_info_from_caps (GstCaps * caps, GstTensorsInfo * info,
+ 256 12908 : get_tensors_info_from_caps (GstCaps * caps, GstTensorsInfo * info,
257 : gboolean * is_flexible)
258 : {
259 : GstStructure *s;
260 : GstTensorsConfig config;
261 : guint i, n_caps;
- 262 13306 : gboolean found = FALSE;
+ 262 12908 : gboolean found = FALSE;
263 :
- 264 13306 : n_caps = gst_caps_get_size (caps);
+ 264 12908 : n_caps = gst_caps_get_size (caps);
265 :
- 266 13313 : for (i = 0; i < n_caps; i++) {
- 267 13307 : s = gst_caps_get_structure (caps, i);
- 268 13307 : found = gst_tensors_config_from_structure (&config, s);
+ 266 12915 : for (i = 0; i < n_caps; i++) {
+ 267 12909 : s = gst_caps_get_structure (caps, i);
+ 268 12909 : found = gst_tensors_config_from_structure (&config, s);
269 :
- 270 13307 : if (found) {
- 271 13300 : gst_tensors_info_free (info);
- 272 13300 : gst_tensors_info_copy (info, &config.info);
- 273 13300 : *is_flexible = gst_tensors_config_is_flexible (&config);
+ 270 12909 : if (found) {
+ 271 12902 : gst_tensors_info_free (info);
+ 272 12902 : gst_tensors_info_copy (info, &config.info);
+ 273 12902 : *is_flexible = gst_tensors_config_is_flexible (&config);
274 : }
275 :
- 276 13307 : gst_tensors_config_free (&config);
- 277 13307 : if (found)
- 278 13300 : break;
+ 276 12909 : gst_tensors_config_free (&config);
+ 277 12909 : if (found)
+ 278 12902 : break;
279 : }
280 :
- 281 13306 : return found;
+ 281 12908 : return found;
282 : }
283 :
284 : /**
285 : * @brief Handle a sink element for registered ml_pipeline_sink_cb
286 : */
287 : static void
- 288 6680 : cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data)
+ 288 6488 : cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data)
289 : {
- 290 6680 : ml_pipeline_element *elem = user_data;
+ 290 6488 : ml_pipeline_element *elem = user_data;
291 :
292 : /** @todo CRITICAL if the pipeline is being killed, don't proceed! */
293 : GstMemory *mem[ML_TENSOR_SIZE_LIMIT];
294 : GstMapInfo map[ML_TENSOR_SIZE_LIMIT];
295 : guint i, num_tensors;
296 : GList *l;
- 297 6680 : ml_tensors_data_s *_data = NULL;
+ 297 6488 : ml_tensors_data_s *_data = NULL;
298 : GstTensorsInfo gst_info;
299 : int status;
300 :
- 301 6680 : gst_tensors_info_init (&gst_info);
- 302 6680 : gst_info.num_tensors = num_tensors = gst_tensor_buffer_get_count (b);
+ 301 6488 : gst_tensors_info_init (&gst_info);
+ 302 6488 : gst_info.num_tensors = num_tensors = gst_tensor_buffer_get_count (b);
303 :
304 : /* Set tensor data. The handle for tensors-info in data should be added. */
305 : status =
- 306 6680 : _ml_tensors_data_create_no_alloc (NULL, (ml_tensors_data_h *) & _data);
- 307 6680 : if (status != ML_ERROR_NONE) {
+ 306 6488 : _ml_tensors_data_create_no_alloc (NULL, (ml_tensors_data_h *) & _data);
+ 307 6488 : if (status != ML_ERROR_NONE) {
308 0 : _ml_loge (_ml_detail
309 : ("Failed to allocate memory for tensors data in sink callback, which is registered by ml_pipeline_sink_register ()."));
- 310 6680 : return;
+ 310 6488 : return;
311 : }
312 :
- 313 6680 : g_mutex_lock (&elem->lock);
+ 313 6488 : g_mutex_lock (&elem->lock);
314 :
- 315 6680 : _data->num_tensors = num_tensors;
- 316 13837 : for (i = 0; i < num_tensors; i++) {
- 317 7157 : mem[i] = gst_tensor_buffer_get_nth_memory (b, i);
- 318 7157 : if (!gst_memory_map (mem[i], &map[i], GST_MAP_READ)) {
+ 315 6488 : _data->num_tensors = num_tensors;
+ 316 13453 : for (i = 0; i < num_tensors; i++) {
+ 317 6965 : mem[i] = gst_tensor_buffer_get_nth_memory (b, i);
+ 318 6965 : if (!gst_memory_map (mem[i], &map[i], GST_MAP_READ)) {
319 0 : _ml_loge (_ml_detail
320 : ("Failed to map the output in sink '%s' callback, which is registered by ml_pipeline_sink_register ()",
321 : elem->name));
@@ -386,12 +386,12 @@
324 0 : goto error;
325 : }
326 :
- 327 7157 : _data->tensors[i].data = map[i].data;
- 328 7157 : _data->tensors[i].size = map[i].size;
+ 327 6965 : _data->tensors[i].data = map[i].data;
+ 328 6965 : _data->tensors[i].size = map[i].size;
329 : }
330 :
331 : /** @todo This assumes that padcap is static */
- 332 6680 : if (elem->sink == NULL) {
+ 332 6488 : if (elem->sink == NULL) {
333 28 : gboolean found = FALSE;
334 28 : gboolean flexible = FALSE;
335 :
@@ -423,7 +423,7 @@
361 : }
362 :
363 : /* Prepare output and set data. */
- 364 6680 : if (elem->is_flexible_tensor) {
+ 364 6488 : if (elem->is_flexible_tensor) {
365 : GstTensorMetaInfo meta;
366 : gsize hsize;
367 :
@@ -439,10 +439,10 @@
377 9 : _data->tensors[i].size = map[i].size - hsize;
378 : }
379 : } else {
- 380 6677 : gst_tensors_info_copy (&gst_info, &elem->tensors_info);
+ 380 6485 : gst_tensors_info_copy (&gst_info, &elem->tensors_info);
381 :
382 : /* Compare output info and buffer if gst-buffer is not flexible. */
- 383 6677 : if (gst_info.num_tensors != num_tensors) {
+ 383 6485 : if (gst_info.num_tensors != num_tensors) {
384 0 : _ml_loge (_ml_detail
385 : ("The sink event of [%s] cannot be handled because the number of tensors mismatches.",
386 : elem->name));
@@ -452,15 +452,15 @@
390 0 : goto error;
391 : }
392 :
- 393 13825 : for (i = 0; i < num_tensors; i++) {
- 394 7148 : size_t sz = gst_tensors_info_get_size (&gst_info, i);
+ 393 13441 : for (i = 0; i < num_tensors; i++) {
+ 394 6956 : size_t sz = gst_tensors_info_get_size (&gst_info, i);
395 :
396 : /* Not configured, yet. */
- 397 7148 : if (sz == 0)
+ 397 6956 : if (sz == 0)
398 0 : _ml_loge (_ml_detail
399 : ("The caps for sink(%s) is not configured.", elem->name));
400 :
- 401 7148 : if (sz != map[i].size) {
+ 401 6956 : if (sz != map[i].size) {
402 0 : _ml_loge (_ml_detail
403 : ("The sink event of [%s] cannot be handled because the tensor dimension mismatches.",
404 : elem->name));
@@ -473,35 +473,35 @@
411 : }
412 :
413 : /* Create new output info, data handle should be updated here. */
- 414 6680 : _ml_tensors_info_create_from_gst (&_data->info, &gst_info);
+ 414 6488 : _ml_tensors_info_create_from_gst (&_data->info, &gst_info);
415 :
416 : /* Iterate e->handles, pass the data to them */
- 417 13366 : for (l = elem->handles; l != NULL; l = l->next) {
+ 417 12982 : for (l = elem->handles; l != NULL; l = l->next) {
418 : ml_pipeline_sink_cb callback;
- 419 6686 : ml_pipeline_common_elem *sink = l->data;
- 420 6686 : if (sink->callback_info == NULL)
+ 419 6494 : ml_pipeline_common_elem *sink = l->data;
+ 420 6494 : if (sink->callback_info == NULL)
421 3 : continue;
422 :
- 423 6683 : callback = sink->callback_info->sink_cb;
- 424 6683 : if (callback)
- 425 6683 : callback (_data, _data->info, sink->callback_info->sink_pdata);
+ 423 6491 : callback = sink->callback_info->sink_cb;
+ 424 6491 : if (callback)
+ 425 6491 : callback (_data, _data->info, sink->callback_info->sink_pdata);
426 :
427 : /** @todo Measure time. Warn if it takes long. Kill if it takes too long. */
428 : }
429 :
- 430 6680 : error:
- 431 6680 : g_mutex_unlock (&elem->lock);
+ 430 6488 : error:
+ 431 6488 : g_mutex_unlock (&elem->lock);
432 :
- 433 13837 : for (i = 0; i < num_tensors; i++) {
- 434 7157 : gst_memory_unmap (mem[i], &map[i]);
- 435 7157 : gst_memory_unref (mem[i]);
+ 433 13453 : for (i = 0; i < num_tensors; i++) {
+ 434 6965 : gst_memory_unmap (mem[i], &map[i]);
+ 435 6965 : gst_memory_unref (mem[i]);
436 : }
437 :
- 438 6680 : _ml_tensors_data_destroy_internal (_data, FALSE);
- 439 6680 : _data = NULL;
+ 438 6488 : _ml_tensors_data_destroy_internal (_data, FALSE);
+ 439 6488 : _data = NULL;
440 :
- 441 6680 : gst_tensors_info_free (&gst_info);
- 442 6680 : return;
+ 441 6488 : gst_tensors_info_free (&gst_info);
+ 442 6488 : return;
443 : }
444 :
445 : /**
@@ -527,16 +527,16 @@
465 : * @brief Callback for bus message.
466 : */
467 : static void
- 468 5410 : cb_bus_sync_message (GstBus * bus, GstMessage * message, gpointer user_data)
+ 468 5411 : cb_bus_sync_message (GstBus * bus, GstMessage * message, gpointer user_data)
469 : {
470 : ml_pipeline *pipe_h;
471 :
- 472 5410 : pipe_h = (ml_pipeline *) user_data;
+ 472 5411 : pipe_h = (ml_pipeline *) user_data;
473 :
- 474 5410 : if (pipe_h == NULL)
+ 474 5411 : if (pipe_h == NULL)
475 0 : return;
476 :
- 477 5410 : switch (GST_MESSAGE_TYPE (message)) {
+ 477 5411 : switch (GST_MESSAGE_TYPE (message)) {
478 7 : case GST_MESSAGE_EOS:
479 7 : pipe_h->isEOS = TRUE;
480 7 : break;
@@ -556,8 +556,8 @@
494 : }
495 : }
496 4390 : break;
- 497 1013 : default:
- 498 1013 : break;
+ 497 1014 : default:
+ 498 1014 : break;
499 : }
500 : }
501 :
@@ -1559,29 +1559,29 @@
1497 : * @brief Parse tensors info of src element.
1498 : */
1499 : static int
- 1500 13278 : ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem)
+ 1500 12880 : ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem)
1501 : {
- 1502 13278 : GstCaps *caps = NULL;
- 1503 13278 : gboolean found = FALSE, flexible = FALSE;
+ 1502 12880 : GstCaps *caps = NULL;
+ 1503 12880 : gboolean found = FALSE, flexible = FALSE;
1504 :
- 1505 13278 : if (elem->src == NULL) {
+ 1505 12880 : if (elem->src == NULL) {
1506 41 : elem->src = gst_element_get_static_pad (elem->element, "src");
1507 : }
1508 :
- 1509 13278 : if (elem->src == NULL) {
+ 1509 12880 : if (elem->src == NULL) {
1510 0 : _ml_error_report
1511 : ("Failed to get the src pad of the element[%s]. The designated source element does not have available src pad? For the detail, please check the GStreamer log messages.",
1512 : elem->name);
- 1513 13278 : return ML_ERROR_STREAMS_PIPE;
+ 1513 12880 : return ML_ERROR_STREAMS_PIPE;
1514 : }
1515 :
1516 : /* If caps is given, use it. e.g. Use cap "image/png" when the pipeline is */
1517 : /* given as "appsrc caps=image/png ! pngdec ! ... " */
- 1518 13278 : caps = gst_pad_get_current_caps (elem->src);
- 1519 13278 : if (!caps)
- 1520 13214 : caps = gst_pad_get_allowed_caps (elem->src);
+ 1518 12880 : caps = gst_pad_get_current_caps (elem->src);
+ 1519 12880 : if (!caps)
+ 1520 12816 : caps = gst_pad_get_allowed_caps (elem->src);
1521 :
- 1522 13278 : if (!caps) {
+ 1522 12880 : if (!caps) {
1523 0 : _ml_logw
1524 : ("Cannot find caps. The pipeline is not yet negotiated for src element [%s].",
1525 : elem->name);
@@ -1590,10 +1590,10 @@
1528 0 : return ML_ERROR_TRY_AGAIN;
1529 : }
1530 :
- 1531 13278 : found = get_tensors_info_from_caps (caps, &elem->tensors_info, &flexible);
+ 1531 12880 : found = get_tensors_info_from_caps (caps, &elem->tensors_info, &flexible);
1532 :
- 1533 13278 : if (found) {
- 1534 13272 : elem->is_flexible_tensor = flexible;
+ 1533 12880 : if (found) {
+ 1534 12874 : elem->is_flexible_tensor = flexible;
1535 : } else {
1536 6 : if (gst_caps_is_fixed (caps)) {
1537 5 : GstStructure *st = gst_caps_get_structure (caps, 0);
@@ -1601,8 +1601,8 @@
1539 : }
1540 : }
1541 :
- 1542 13278 : gst_caps_unref (caps);
- 1543 13278 : return ML_ERROR_NONE;
+ 1542 12880 : gst_caps_unref (caps);
+ 1543 12880 : return ML_ERROR_NONE;
1544 : }
1545 :
1546 : /**
@@ -1698,7 +1698,7 @@
1636 : * @brief Push a data frame to a src (more info in nnstreamer.h)
1637 : */
1638 : int
- 1639 6660 : ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data,
+ 1639 6461 : ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data,
1640 : ml_pipeline_buf_policy_e policy)
1641 : {
1642 : GstBuffer *buffer;
@@ -1710,18 +1710,18 @@
1648 : ml_tensors_data_s *_data;
1649 : unsigned int i;
1650 :
- 1651 13320 : handle_init (src, h);
+ 1651 12922 : handle_init (src, h);
1652 :
- 1653 6660 : _data = (ml_tensors_data_s *) data;
- 1654 6660 : if (!_data) {
+ 1653 6461 : _data = (ml_tensors_data_s *) data;
+ 1654 6461 : if (!_data) {
1655 1 : _ml_error_report
1656 : ("The given parameter, data (ml_tensors_data_h), is NULL. It should be a valid ml_tensor_data_h instance, which is usually created by ml_tensors_data_create().");
1657 1 : ret = ML_ERROR_INVALID_PARAMETER;
1658 1 : goto unlock_return;
1659 : }
- 1660 6659 : G_LOCK_UNLESS_NOLOCK (*_data);
+ 1660 6460 : G_LOCK_UNLESS_NOLOCK (*_data);
1661 :
- 1662 6659 : if (_data->num_tensors < 1 || _data->num_tensors > ML_TENSOR_SIZE_LIMIT) {
+ 1662 6460 : if (_data->num_tensors < 1 || _data->num_tensors > ML_TENSOR_SIZE_LIMIT) {
1663 0 : _ml_error_report
1664 : ("The number of tensors of the given data (ml_tensors_data_h) is invalid. The number of tensors of data is %u. It should be between 1 and %u.",
1665 : _data->num_tensors, ML_TENSOR_SIZE_LIMIT);
@@ -1729,9 +1729,9 @@
1667 0 : goto dont_destroy_data;
1668 : }
1669 :
- 1670 6659 : ret = ml_pipeline_src_parse_tensors_info (elem);
+ 1670 6460 : ret = ml_pipeline_src_parse_tensors_info (elem);
1671 :
- 1672 6659 : if (ret != ML_ERROR_NONE) {
+ 1672 6460 : if (ret != ML_ERROR_NONE) {
1673 0 : if (ret == ML_ERROR_TRY_AGAIN)
1674 0 : _ml_error_report_continue
1675 : ("The pipeline is not ready to accept input streams. The input is ignored.");
@@ -1741,8 +1741,8 @@
1679 0 : goto dont_destroy_data;
1680 : }
1681 :
- 1682 6659 : if (!elem->is_media_stream && !elem->is_flexible_tensor) {
- 1683 6652 : if (elem->tensors_info.num_tensors != _data->num_tensors) {
+ 1682 6460 : if (!elem->is_media_stream && !elem->is_flexible_tensor) {
+ 1683 6453 : if (elem->tensors_info.num_tensors != _data->num_tensors) {
1684 0 : _ml_error_report
1685 : ("The src push of [%s] cannot be handled because the number of tensors in a frame mismatches. %u != %u",
1686 : elem->name, elem->tensors_info.num_tensors, _data->num_tensors);
@@ -1751,10 +1751,10 @@
1689 0 : goto dont_destroy_data;
1690 : }
1691 :
- 1692 13457 : for (i = 0; i < _data->num_tensors; i++) {
- 1693 6807 : size_t sz = gst_tensors_info_get_size (&elem->tensors_info, i);
+ 1692 13059 : for (i = 0; i < _data->num_tensors; i++) {
+ 1693 6608 : size_t sz = gst_tensors_info_get_size (&elem->tensors_info, i);
1694 :
- 1695 6807 : if (sz != _data->tensors[i].size) {
+ 1695 6608 : if (sz != _data->tensors[i].size) {
1696 2 : _ml_error_report
1697 : ("The given input tensor size (%d'th, %zu bytes) mismatches the source pad (%zu bytes)",
1698 : i, _data->tensors[i].size, sz);
@@ -1766,21 +1766,21 @@
1704 : }
1705 :
1706 : /* Create buffer to be pushed from buf[] */
- 1707 6657 : buffer = gst_buffer_new ();
- 1708 6657 : _ml_tensors_info_copy_from_ml (&gst_info, _data->info);
+ 1707 6458 : buffer = gst_buffer_new ();
+ 1708 6458 : _ml_tensors_info_copy_from_ml (&gst_info, _data->info);
1709 :
- 1710 13475 : for (i = 0; i < _data->num_tensors; i++) {
+ 1710 13077 : for (i = 0; i < _data->num_tensors; i++) {
1711 : GstTensorInfo *_gst_tensor_info =
- 1712 6818 : gst_tensors_info_get_nth_info (&gst_info, i);
- 1713 6818 : mem_data = _data->tensors[i].data;
- 1714 6818 : mem_size = _data->tensors[i].size;
+ 1712 6619 : gst_tensors_info_get_nth_info (&gst_info, i);
+ 1713 6619 : mem_data = _data->tensors[i].data;
+ 1714 6619 : mem_size = _data->tensors[i].size;
1715 :
- 1716 6818 : mem = tmp = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
+ 1716 6619 : mem = tmp = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
1717 : mem_data, mem_size, 0, mem_size, mem_data,
1718 : (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) ? g_free : NULL);
1719 :
1720 : /* flex tensor, append header. */
- 1721 6818 : if (elem->is_flexible_tensor) {
+ 1721 6619 : if (elem->is_flexible_tensor) {
1722 : GstTensorMetaInfo meta;
1723 :
1724 9 : gst_tensor_info_convert_to_meta (_gst_tensor_info, &meta);
@@ -1789,76 +1789,76 @@
1727 9 : gst_memory_unref (tmp);
1728 : }
1729 :
- 1730 6818 : gst_tensor_buffer_append_memory (buffer, mem, _gst_tensor_info);
+ 1730 6619 : gst_tensor_buffer_append_memory (buffer, mem, _gst_tensor_info);
1731 : /** @todo Verify that gst_buffer_append lists tensors/gstmem in the correct order */
1732 : }
1733 :
- 1734 6657 : gst_tensors_info_free (&gst_info);
+ 1734 6458 : gst_tensors_info_free (&gst_info);
1735 :
1736 : /* Unlock if it's not auto-free. We do not know when it'll be freed. */
- 1737 6657 : if (policy != ML_PIPELINE_BUF_POLICY_AUTO_FREE)
+ 1737 6458 : if (policy != ML_PIPELINE_BUF_POLICY_AUTO_FREE)
1738 55 : G_UNLOCK_UNLESS_NOLOCK (*_data);
1739 :
1740 : /* Push the data! */
- 1741 6657 : gret = gst_app_src_push_buffer (GST_APP_SRC (elem->element), buffer);
+ 1741 6458 : gret = gst_app_src_push_buffer (GST_APP_SRC (elem->element), buffer);
1742 :
1743 : /* Free data ptr if buffer policy is auto-free */
- 1744 6657 : if (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) {
- 1745 6602 : G_UNLOCK_UNLESS_NOLOCK (*_data);
- 1746 6602 : _ml_tensors_data_destroy_internal (_data, FALSE);
- 1747 6602 : _data = NULL;
+ 1744 6458 : if (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) {
+ 1745 6403 : G_UNLOCK_UNLESS_NOLOCK (*_data);
+ 1746 6403 : _ml_tensors_data_destroy_internal (_data, FALSE);
+ 1747 6403 : _data = NULL;
1748 : }
1749 :
- 1750 6657 : if (gret == GST_FLOW_FLUSHING) {
+ 1750 6458 : if (gret == GST_FLOW_FLUSHING) {
1751 0 : _ml_logw
1752 : ("The pipeline is not in PAUSED/PLAYING. The input may be ignored.");
1753 0 : ret = ML_ERROR_TRY_AGAIN;
- 1754 6657 : } else if (gret == GST_FLOW_EOS) {
+ 1754 6458 : } else if (gret == GST_FLOW_EOS) {
1755 0 : _ml_logw ("THe pipeline is in EOS state. The input is ignored.");
1756 0 : ret = ML_ERROR_STREAMS_PIPE;
1757 : }
1758 :
- 1759 6657 : goto unlock_return;
+ 1759 6458 : goto unlock_return;
1760 :
1761 2 : dont_destroy_data:
1762 2 : G_UNLOCK_UNLESS_NOLOCK (*_data);
1763 :
- 1764 6660 : handle_exit (h);
+ 1764 6461 : handle_exit (h);
1765 : }
1766 :
1767 : /**
1768 : * @brief Internal function to fetch ml_pipeline_src_callbacks_s pointer
1769 : */
1770 : static ml_pipeline_src_callbacks_s *
- 1771 6572 : get_app_src_callback (ml_pipeline_common_elem * src_h, void **data)
+ 1771 6373 : get_app_src_callback (ml_pipeline_common_elem * src_h, void **data)
1772 : {
- 1773 6572 : ml_pipeline_src_callbacks_s *src_cb = NULL;
+ 1773 6373 : ml_pipeline_src_callbacks_s *src_cb = NULL;
1774 :
- 1775 6572 : if (src_h->callback_info) {
- 1776 6572 : src_cb = &src_h->callback_info->src_cb;
- 1777 6572 : *data = src_h->callback_info->src_pdata;
+ 1775 6373 : if (src_h->callback_info) {
+ 1776 6373 : src_cb = &src_h->callback_info->src_cb;
+ 1777 6373 : *data = src_h->callback_info->src_pdata;
1778 : }
1779 :
- 1780 6572 : return src_cb;
+ 1780 6373 : return src_cb;
1781 : }
1782 :
1783 : /**
1784 : * @brief Internal function for appsrc callback - need_data.
1785 : */
1786 : static void
- 1787 6572 : _pipe_src_cb_need_data (GstAppSrc * src, guint length, gpointer user_data)
+ 1787 6373 : _pipe_src_cb_need_data (GstAppSrc * src, guint length, gpointer user_data)
1788 : {
1789 : ml_pipeline_common_elem *src_h;
- 1790 6572 : ml_pipeline_src_callbacks_s *src_cb = NULL;
- 1791 6572 : void *pdata = NULL;
+ 1790 6373 : ml_pipeline_src_callbacks_s *src_cb = NULL;
+ 1791 6373 : void *pdata = NULL;
1792 :
- 1793 6572 : src_h = (ml_pipeline_common_elem *) user_data;
- 1794 6572 : if (!src_h)
+ 1793 6373 : src_h = (ml_pipeline_common_elem *) user_data;
+ 1794 6373 : if (!src_h)
1795 0 : return;
1796 :
- 1797 6572 : src_cb = get_app_src_callback (src_h, &pdata);
- 1798 6572 : if (src_cb && src_cb->need_data)
- 1799 6572 : src_cb->need_data (src_h, length, pdata);
+ 1797 6373 : src_cb = get_app_src_callback (src_h, &pdata);
+ 1798 6373 : if (src_cb && src_cb->need_data)
+ 1799 6373 : src_cb->need_data (src_h, length, pdata);
1800 : }
1801 :
1802 : /**
@@ -1944,27 +1944,27 @@
1882 : * @brief Gets a handle for the tensors metadata of given src node.
1883 : */
1884 : int
- 1885 6576 : ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h, ml_tensors_info_h * info)
+ 1885 6377 : ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h, ml_tensors_info_h * info)
1886 : {
- 1887 6576 : handle_init (src, h);
+ 1887 6377 : handle_init (src, h);
1888 :
- 1889 6576 : if (info == NULL) {
+ 1889 6377 : if (info == NULL) {
1890 0 : _ml_error_report
1891 : ("The parameter, info (ml_tensors_info_h *), is NULL. It should be a valid pointer to a ml_tensors_info_h instance, which is usually created by ml_tensors_info_create().");
1892 0 : ret = ML_ERROR_INVALID_PARAMETER;
1893 0 : goto unlock_return;
1894 : }
1895 :
- 1896 6576 : ret = ml_pipeline_src_parse_tensors_info (elem);
+ 1896 6377 : ret = ml_pipeline_src_parse_tensors_info (elem);
1897 :
- 1898 6576 : if (ret == ML_ERROR_NONE) {
- 1899 6576 : ret = _ml_tensors_info_create_from_gst (info, &elem->tensors_info);
+ 1898 6377 : if (ret == ML_ERROR_NONE) {
+ 1899 6377 : ret = _ml_tensors_info_create_from_gst (info, &elem->tensors_info);
1900 : } else {
1901 0 : _ml_error_report_continue
1902 : ("ml_pipeline_src_parse_tensors_info () has returned error; it cannot fetch input tensor info (metadata of input stream) for the given ml_pipeline_src_h handle (h). ml_pipeline_src_get_tensors_info () cannot continue.");
1903 : }
1904 :
- 1905 6576 : handle_exit (h);
+ 1905 6377 : handle_exit (h);
1906 : }
1907 :
1908 : /****************************************************
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html
index 7e372a69..681bbf10 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -63,23 +63,23 @@
- ml_single_destroy_notify_cb |
+ __destroy_notify |
0 |
- ml_single_set_inout_tensors_info |
+ ml_single_destroy_notify_cb |
0 |
- __destroy_notify |
+ ml_single_set_inout_tensors_info |
- 1 |
+ 0 |
@@ -226,7 +226,7 @@
__invoke |
- 87 |
+ 88 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html
index 47e96d0f..17d2948b 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -65,14 +65,14 @@
__destroy_notify |
- 1 |
+ 0 |
__invoke |
- 87 |
+ 88 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html
index 57bcb28c..2ab7143f 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -397,29 +397,29 @@
335 : * @brief To call the framework to destroy the allocated output data
336 : */
337 : static inline void
- 338 1 : __destroy_notify (gpointer data_h, gpointer single_data)
+ 338 0 : __destroy_notify (gpointer data_h, gpointer single_data)
339 : {
340 : ml_single *single_h;
341 : ml_tensors_data_s *data;
342 :
- 343 1 : data = (ml_tensors_data_s *) data_h;
- 344 1 : single_h = (ml_single *) single_data;
+ 343 0 : data = (ml_tensors_data_s *) data_h;
+ 344 0 : single_h = (ml_single *) single_data;
345 :
- 346 1 : if (G_LIKELY (single_h->filter)) {
- 347 1 : if (single_h->klass->allocate_in_invoke (single_h->filter)) {
- 348 0 : single_h->klass->destroy_notify (single_h->filter, data->tensors);
+ 346 0 : if (G_LIKELY (single_h->filter)) {
+ 347 0 : if (single_h->klass->allocate_in_invoke (single_h->filter)) {
+ 348 0 : single_h->klass->destroy_notify (single_h->filter, data->tensors);
349 : }
350 : }
351 :
352 : /* reset callback function */
- 353 1 : data->destroy = NULL;
- 354 1 : }
+ 353 0 : data->destroy = NULL;
+ 354 0 : }
355 :
356 : /**
357 : * @brief Wrapper function for __destroy_notify
358 : */
359 : static int
- 360 0 : ml_single_destroy_notify_cb (void *handle, void *user_data)
+ 360 0 : ml_single_destroy_notify_cb (void *handle, void *user_data)
361 : {
362 0 : ml_tensors_data_h data = (ml_tensors_data_h) handle;
363 0 : ml_single_h single = (ml_single_h) user_data;
@@ -478,24 +478,24 @@
416 : * @brief Internal function to call subplugin's invoke
417 : */
418 : static inline int
- 419 87 : __invoke (ml_single * single_h, ml_tensors_data_h in, ml_tensors_data_h out,
+ 419 88 : __invoke (ml_single * single_h, ml_tensors_data_h in, ml_tensors_data_h out,
420 : gboolean alloc_output)
421 : {
422 : ml_tensors_data_s *in_data, *out_data;
- 423 87 : int status = ML_ERROR_NONE;
+ 423 88 : int status = ML_ERROR_NONE;
424 :
- 425 87 : in_data = (ml_tensors_data_s *) in;
- 426 87 : out_data = (ml_tensors_data_s *) out;
+ 425 88 : in_data = (ml_tensors_data_s *) in;
+ 426 88 : out_data = (ml_tensors_data_s *) out;
427 :
428 : /* Prevent error case when input or output is null in invoke thread. */
- 429 87 : if (!in_data || !out_data) {
+ 429 88 : if (!in_data || !out_data) {
430 0 : _ml_error_report ("Failed to invoke a model, invalid data handle.");
431 0 : return ML_ERROR_STREAMS_PIPE;
432 : }
433 :
434 : /* Invoke the thread. */
- 435 87 : if (!single_h->klass->invoke (single_h->filter, in_data->tensors,
- 436 87 : out_data->tensors, alloc_output)) {
+ 435 88 : if (!single_h->klass->invoke (single_h->filter, in_data->tensors,
+ 436 88 : out_data->tensors, alloc_output)) {
437 0 : const char *fw_name = _ml_get_nnfw_subplugin_name (single_h->nnfw);
438 0 : _ml_error_report
439 : ("Failed to invoke the tensors. The invoke callback of the tensor-filter subplugin '%s' has failed. Please contact the author of tensor-filter-%s (nnstreamer-%s) or review its source code. Note that this usually happens when the designated framework does not support the given model (e.g., trying to run tf-lite 2.6 model with tf-lite 1.13).",
@@ -503,7 +503,7 @@
441 0 : status = ML_ERROR_STREAMS_PIPE;
442 : }
443 :
- 444 87 : return status;
+ 444 88 : return status;
445 : }
446 :
447 : /**
@@ -563,35 +563,35 @@
501 102 : int status = ML_ERROR_NONE;
502 :
503 : /** wait for data */
- 504 125 : while (single_h->state != RUNNING) {
+ 504 126 : while (single_h->state != RUNNING) {
505 102 : g_cond_wait (&single_h->cond, &single_h->mutex);
506 100 : if (single_h->state == JOIN_REQUESTED)
- 507 77 : goto exit;
+ 507 76 : goto exit;
508 : }
509 :
- 510 23 : input = single_h->input;
- 511 23 : output = single_h->output;
+ 510 24 : input = single_h->input;
+ 511 24 : output = single_h->output;
512 : /* Set null to prevent double-free. */
- 513 23 : single_h->input = single_h->output = NULL;
+ 513 24 : single_h->input = single_h->output = NULL;
514 :
- 515 23 : single_h->invoking = TRUE;
- 516 23 : alloc_output = single_h->free_output;
- 517 23 : g_mutex_unlock (&single_h->mutex);
- 518 23 : status = __invoke (single_h, input, output, alloc_output);
- 519 23 : g_mutex_lock (&single_h->mutex);
+ 515 24 : single_h->invoking = TRUE;
+ 516 24 : alloc_output = single_h->free_output;
+ 517 24 : g_mutex_unlock (&single_h->mutex);
+ 518 24 : status = __invoke (single_h, input, output, alloc_output);
+ 519 24 : g_mutex_lock (&single_h->mutex);
520 : /* Clear input data after invoke is done. */
- 521 23 : ml_tensors_data_destroy (input);
- 522 23 : single_h->invoking = FALSE;
+ 521 24 : ml_tensors_data_destroy (input);
+ 522 24 : single_h->invoking = FALSE;
523 :
- 524 23 : if (status != ML_ERROR_NONE || single_h->state == JOIN_REQUESTED) {
- 525 3 : if (alloc_output) {
- 526 3 : single_h->destroy_data_list =
- 527 3 : g_list_remove (single_h->destroy_data_list, output);
- 528 3 : ml_tensors_data_destroy (output);
+ 524 24 : if (status != ML_ERROR_NONE || single_h->state == JOIN_REQUESTED) {
+ 525 4 : if (alloc_output) {
+ 526 4 : single_h->destroy_data_list =
+ 527 4 : g_list_remove (single_h->destroy_data_list, output);
+ 528 4 : ml_tensors_data_destroy (output);
529 : }
530 :
- 531 3 : if (single_h->state == JOIN_REQUESTED)
- 532 3 : goto exit;
+ 531 4 : if (single_h->state == JOIN_REQUESTED)
+ 532 4 : goto exit;
533 0 : goto wait_for_next;
534 : }
535 :
@@ -1298,10 +1298,10 @@
1236 80 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1237 :
1238 : /** Wait until invoke process is finished */
- 1239 674 : while (invoking) {
- 1240 594 : _ml_logd ("Wait 1 ms until invoke is finished and close the handle.");
- 1241 594 : g_usleep (1000);
- 1242 594 : invoking = single_h->invoking;
+ 1239 1325 : while (invoking) {
+ 1240 1245 : _ml_logd ("Wait 1 ms until invoke is finished and close the handle.");
+ 1241 1245 : g_usleep (1000);
+ 1242 1245 : invoking = single_h->invoking;
1243 : /**
1244 : * single_h->invoking is the only protected value here and we are
1245 : * doing a read-only operation and do not need to project its value
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html
index 4c545954..27b12f63 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html
index 73a2e405..d03a406a 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html
index aac83602..447ce108 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -337,8 +337,8 @@
275 : }
276 :
277 13 : if (*pipeline_desc != NULL) {
- 278 7 : _ml_logw (WARN_MSG_DPTR_SET_OVER, "char *pipeline_desc = NULL");
- 279 7 : *pipeline_desc = NULL;
+ 278 6 : _ml_logw (WARN_MSG_DPTR_SET_OVER, "char *pipeline_desc = NULL");
+ 279 6 : *pipeline_desc = NULL;
280 : }
281 :
282 13 : ret = ml_agent_pipeline_get_description (name, pipeline_desc);
@@ -390,7 +390,7 @@
328 : }
329 :
330 15 : if (*handle != NULL) {
- 331 10 : _ml_logw (WARN_MSG_DPTR_SET_OVER, "ml_service_h *handle = NULL");
+ 331 9 : _ml_logw (WARN_MSG_DPTR_SET_OVER, "ml_service_h *handle = NULL");
332 : }
333 15 : *handle = NULL;
334 :
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html
index c1cbce7b..ac712465 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html
index 8469fc46..25bc51e9 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html
index ff6cbe96..59e6156a 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html
index 16e5ae4e..ecace1dc 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -240,7 +240,7 @@
_mlrs_edge_event_cb |
- 54 |
+ 55 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html
index 126f23ac..5a5152c6 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -170,7 +170,7 @@
_mlrs_edge_event_cb |
- 54 |
+ 55 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html
index a81df4ce..6073a363 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -613,17 +613,17 @@
551 : * @brief Edge event callback.
552 : */
553 : static int
- 554 54 : _mlrs_edge_event_cb (nns_edge_event_h event_h, void *user_data)
+ 554 55 : _mlrs_edge_event_cb (nns_edge_event_h event_h, void *user_data)
555 : {
- 556 54 : nns_edge_event_e event = NNS_EDGE_EVENT_UNKNOWN;
- 557 54 : nns_edge_data_h data_h = NULL;
- 558 54 : int ret = NNS_EDGE_ERROR_NONE;
+ 556 55 : nns_edge_event_e event = NNS_EDGE_EVENT_UNKNOWN;
+ 557 55 : nns_edge_data_h data_h = NULL;
+ 558 55 : int ret = NNS_EDGE_ERROR_NONE;
559 :
- 560 54 : ret = nns_edge_event_get_type (event_h, &event);
- 561 54 : if (NNS_EDGE_ERROR_NONE != ret)
- 562 54 : return ret;
+ 560 55 : ret = nns_edge_event_get_type (event_h, &event);
+ 561 55 : if (NNS_EDGE_ERROR_NONE != ret)
+ 562 55 : return ret;
563 :
- 564 54 : switch (event) {
+ 564 55 : switch (event) {
565 17 : case NNS_EDGE_EVENT_NEW_DATA_RECEIVED:
566 : {
567 17 : ret = nns_edge_event_parse_new_data (event_h, &data_h);
@@ -633,14 +633,14 @@
571 17 : ret = _mlrs_process_service_offloading (data_h, user_data);
572 17 : break;
573 : }
- 574 37 : default:
- 575 37 : break;
+ 574 38 : default:
+ 575 38 : break;
576 : }
577 :
- 578 54 : if (data_h)
+ 578 55 : if (data_h)
579 17 : nns_edge_data_destroy (data_h);
580 :
- 581 54 : return ret;
+ 581 55 : return ret;
582 : }
583 :
584 : /**
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html
index b3fd8061..2b3b2771 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html
index eeb62326..9d571e10 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html
index 5f056286..bba0ce23 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html
index d2852d94..896e32a2 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html
index f9d33bd9..e69a4335 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html
index ae90836c..1d683b46 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html
index 4fe3e23d..9a38bbcc 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html
index 8cca446e..79cca349 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html
index d454c6eb..e25799cb 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/cmd_line b/testresult/ml-api/cmd_line
index 799f7eee..7c3081a1 100644
--- a/testresult/ml-api/cmd_line
+++ b/testresult/ml-api/cmd_line
@@ -1 +1 @@
-genhtml -o result unittest-filtered.info -t 'ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a' --ignore-errors source -p /home/abuild/rpmbuild/BUILD
+genhtml -o result unittest-filtered.info -t 'ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e' --ignore-errors source -p /home/abuild/rpmbuild/BUILD
diff --git a/testresult/ml-api/coverage_badge.svg b/testresult/ml-api/coverage_badge.svg
index 39c62830..6185bd92 100644
--- a/testresult/ml-api/coverage_badge.svg
+++ b/testresult/ml-api/coverage_badge.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/testresult/ml-api/index-sort-f.html b/testresult/ml-api/index-sort-f.html
index 0a2632f7..8be5fb2b 100644
--- a/testresult/ml-api/index-sort-f.html
+++ b/testresult/ml-api/index-sort-f.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -82,14 +82,14 @@
capi-machine-learning-inference-1.8.6/c/src |
-
+
|
- 83.3 % |
+ 83.1 % |
5160 |
- 4296 |
- 96.9 % |
+ 4289 |
+ 96.6 % |
294 |
- 285 |
+ 284 |
diff --git a/testresult/ml-api/index-sort-l.html b/testresult/ml-api/index-sort-l.html
index d72492da..756639a2 100644
--- a/testresult/ml-api/index-sort-l.html
+++ b/testresult/ml-api/index-sort-l.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -82,14 +82,14 @@
capi-machine-learning-inference-1.8.6/c/src |
-
+
|
- 83.3 % |
+ 83.1 % |
5160 |
- 4296 |
- 96.9 % |
+ 4289 |
+ 96.6 % |
294 |
- 285 |
+ 284 |
diff --git a/testresult/ml-api/index.html b/testresult/ml-api/index.html
index 9840acb2..29acae45 100644
--- a/testresult/ml-api/index.html
+++ b/testresult/ml-api/index.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 gichan-jang/api#5c9eb479845ba5c405e5e3b857837751ef77da8a
+ LCOV - ML API 1.8.6-0 gichan-jang/api#2c94b63b2f5ed400eaacdf47332377c3ff29545e
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -82,14 +82,14 @@
capi-machine-learning-inference-1.8.6/c/src |
-
+
|
- 83.3 % |
+ 83.1 % |
5160 |
- 4296 |
- 96.9 % |
+ 4289 |
+ 96.6 % |
294 |
- 285 |
+ 284 |