diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html
index e9b326b5..e878a8bd 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-f.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -79,18 +79,6 @@
Total |
Hit |
-
- ml-api-inference-single.c |
-
-
- |
- 75.1 % |
- 855 |
- 642 |
- 92.3 % |
- 39 |
- 36 |
-
ml-api-inference-pipeline.c |
@@ -103,6 +91,18 @@
| 84 |
79 |
+
+ ml-api-inference-single.c |
+
+
+ |
+ 75.9 % |
+ 855 |
+ 649 |
+ 94.9 % |
+ 39 |
+ 37 |
+
ml-api-common.c |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html
index ca4194ce..603073e8 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index-sort-l.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src
@@ -28,21 +28,21 @@
|
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -82,14 +82,14 @@
ml-api-inference-single.c |
-
+
|
- 75.1 % |
+ 75.9 % |
855 |
- 642 |
- 92.3 % |
+ 649 |
+ 94.9 % |
39 |
- 36 |
+ 37 |
ml-api-service-offloading.c |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html
index fa58fefd..e25b03bb 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/index.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -118,14 +118,14 @@
ml-api-inference-single.c |
-
+
|
- 75.1 % |
+ 75.9 % |
855 |
- 642 |
- 92.3 % |
+ 649 |
+ 94.9 % |
39 |
- 36 |
+ 37 |
ml-api-service-agent-client.c |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html
index 5e5f226c..3e529e86 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -317,7 +317,7 @@
ml_tensors_data_destroy |
- 485 |
+ 483 |
@@ -380,77 +380,77 @@
ml_tensors_data_create |
- 6492 |
+ 6567 |
ml_tensors_info_validate |
- 6586 |
+ 6661 |
_ml_tensors_info_create_from |
- 6762 |
+ 6837 |
ml_tensors_info_clone |
- 6775 |
+ 6850 |
_ml_tensors_data_destroy_internal |
- 13057 |
+ 13206 |
_ml_tensors_data_create_no_alloc |
- 13082 |
+ 13233 |
ml_tensors_info_destroy |
- 19893 |
+ 20117 |
ml_tensors_info_create |
- 19907 |
+ 20133 |
_ml_tensors_info_free |
- 19932 |
+ 20156 |
_ml_tensors_info_initialize |
- 19941 |
+ 20167 |
_ml_tensors_info_create_internal |
- 19942 |
+ 20168 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html
index 29b1ba56..800312f3 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -163,14 +163,14 @@
_ml_tensors_data_create_no_alloc |
- 13082 |
+ 13233 |
_ml_tensors_data_destroy_internal |
- 13057 |
+ 13206 |
@@ -184,28 +184,28 @@
_ml_tensors_info_create_from |
- 6762 |
+ 6837 |
_ml_tensors_info_create_internal |
- 19942 |
+ 20168 |
_ml_tensors_info_free |
- 19932 |
+ 20156 |
_ml_tensors_info_initialize |
- 19941 |
+ 20167 |
@@ -324,14 +324,14 @@
ml_tensors_data_create |
- 6492 |
+ 6567 |
ml_tensors_data_destroy |
- 485 |
+ 483 |
@@ -359,14 +359,14 @@
ml_tensors_info_clone |
- 6775 |
+ 6850 |
ml_tensors_info_create |
- 19907 |
+ 20133 |
@@ -380,7 +380,7 @@
ml_tensors_info_destroy |
- 19893 |
+ 20117 |
@@ -450,7 +450,7 @@
ml_tensors_info_validate |
- 6586 |
+ 6661 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html
index 921d2ff9..e48cac8a 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-common.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -184,61 +184,61 @@
122 : * @brief Internal function to create tensors-info handle.
123 : */
124 : static int
- 125 19942 : _ml_tensors_info_create_internal (ml_tensors_info_h * info, bool extended)
+ 125 20168 : _ml_tensors_info_create_internal (ml_tensors_info_h * info, bool extended)
126 : {
127 : ml_tensors_info_s *tensors_info;
128 :
- 129 19942 : check_feature_state (ML_FEATURE);
+ 129 20168 : check_feature_state (ML_FEATURE);
130 :
- 131 19942 : if (!info)
+ 131 20168 : if (!info)
132 2 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
133 : "The parameter, info, is NULL. Provide a valid pointer.");
134 :
- 135 19940 : *info = tensors_info = g_new0 (ml_tensors_info_s, 1);
- 136 19940 : if (tensors_info == NULL)
+ 135 20166 : *info = tensors_info = g_new0 (ml_tensors_info_s, 1);
+ 136 20166 : if (tensors_info == NULL)
137 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
138 : "Failed to allocate the tensors info handle. Out of memory?");
139 :
- 140 19940 : g_mutex_init (&tensors_info->lock);
- 141 19940 : tensors_info->is_extended = extended;
+ 140 20166 : g_mutex_init (&tensors_info->lock);
+ 141 20166 : tensors_info->is_extended = extended;
142 :
143 : /* init tensors info struct */
- 144 19940 : return _ml_tensors_info_initialize (tensors_info);
+ 144 20166 : return _ml_tensors_info_initialize (tensors_info);
145 : }
146 :
147 : /**
148 : * @brief Creates new tensors-info handle and copies tensors information.
149 : */
150 : int
- 151 6762 : _ml_tensors_info_create_from (const ml_tensors_info_h in,
+ 151 6837 : _ml_tensors_info_create_from (const ml_tensors_info_h in,
152 : ml_tensors_info_h * out)
153 : {
154 : ml_tensors_info_s *_info;
155 : int status;
156 :
- 157 6762 : if (!in || !out)
+ 157 6837 : if (!in || !out)
158 0 : return ML_ERROR_INVALID_PARAMETER;
159 :
- 160 6762 : _info = (ml_tensors_info_s *) in;
+ 160 6837 : _info = (ml_tensors_info_s *) in;
161 :
- 162 6762 : if (_info->is_extended)
+ 162 6837 : if (_info->is_extended)
163 7 : status = ml_tensors_info_create_extended (out);
164 : else
- 165 6755 : status = ml_tensors_info_create (out);
+ 165 6830 : status = ml_tensors_info_create (out);
166 :
- 167 6762 : if (status == ML_ERROR_NONE)
- 168 6762 : status = ml_tensors_info_clone (*out, in);
+ 167 6837 : if (status == ML_ERROR_NONE)
+ 168 6837 : status = ml_tensors_info_clone (*out, in);
169 :
- 170 6762 : return status;
+ 170 6837 : return status;
171 : }
172 :
173 : /**
174 : * @brief Allocates a tensors information handle with default value.
175 : */
176 : int
- 177 19907 : ml_tensors_info_create (ml_tensors_info_h * info)
+ 177 20133 : ml_tensors_info_create (ml_tensors_info_h * info)
178 : {
- 179 19907 : return _ml_tensors_info_create_internal (info, false);
+ 179 20133 : return _ml_tensors_info_create_internal (info, false);
180 : }
181 :
182 : /**
@@ -254,53 +254,53 @@
192 : * @brief Frees the given handle of a tensors information.
193 : */
194 : int
- 195 19893 : ml_tensors_info_destroy (ml_tensors_info_h info)
+ 195 20117 : ml_tensors_info_destroy (ml_tensors_info_h info)
196 : {
197 : ml_tensors_info_s *tensors_info;
198 :
- 199 19893 : check_feature_state (ML_FEATURE);
+ 199 20117 : check_feature_state (ML_FEATURE);
200 :
- 201 19893 : tensors_info = (ml_tensors_info_s *) info;
+ 201 20117 : tensors_info = (ml_tensors_info_s *) info;
202 :
- 203 19893 : if (!tensors_info)
+ 203 20117 : if (!tensors_info)
204 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
205 : "The parameter, info, is NULL. Provide a valid pointer.");
206 :
- 207 19892 : G_LOCK_UNLESS_NOLOCK (*tensors_info);
- 208 19892 : _ml_tensors_info_free (tensors_info);
- 209 19892 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ 207 20116 : G_LOCK_UNLESS_NOLOCK (*tensors_info);
+ 208 20116 : _ml_tensors_info_free (tensors_info);
+ 209 20116 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
210 :
- 211 19892 : g_mutex_clear (&tensors_info->lock);
- 212 19892 : g_free (tensors_info);
+ 211 20116 : g_mutex_clear (&tensors_info->lock);
+ 212 20116 : g_free (tensors_info);
213 :
- 214 19892 : return ML_ERROR_NONE;
+ 214 20116 : return ML_ERROR_NONE;
215 : }
216 :
217 : /**
218 : * @brief Validates the given tensors info is valid.
219 : */
220 : int
- 221 6586 : ml_tensors_info_validate (const ml_tensors_info_h info, bool *valid)
+ 221 6661 : ml_tensors_info_validate (const ml_tensors_info_h info, bool *valid)
222 : {
223 : ml_tensors_info_s *tensors_info;
224 :
- 225 6586 : check_feature_state (ML_FEATURE);
+ 225 6661 : check_feature_state (ML_FEATURE);
226 :
- 227 6586 : if (!valid)
+ 227 6661 : if (!valid)
228 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
229 : "The data-return parameter, valid, is NULL. It should be a pointer pre-allocated by the caller.");
230 :
- 231 6585 : tensors_info = (ml_tensors_info_s *) info;
+ 231 6660 : tensors_info = (ml_tensors_info_s *) info;
232 :
- 233 6585 : if (!tensors_info)
+ 233 6660 : if (!tensors_info)
234 3 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
235 : "The input parameter, tensors_info, is NULL. It should be a valid ml_tensors_info_h, which is usually created by ml_tensors_info_create().");
236 :
- 237 6582 : G_LOCK_UNLESS_NOLOCK (*tensors_info);
- 238 6582 : *valid = gst_tensors_info_validate (&tensors_info->info);
- 239 6582 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ 237 6657 : G_LOCK_UNLESS_NOLOCK (*tensors_info);
+ 238 6657 : *valid = gst_tensors_info_validate (&tensors_info->info);
+ 239 6657 : G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
240 :
- 241 6582 : return ML_ERROR_NONE;
+ 241 6657 : return ML_ERROR_NONE;
242 : }
243 :
244 : /**
@@ -711,15 +711,15 @@
649 : * @brief Initializes the tensors information with default value.
650 : */
651 : int
- 652 19941 : _ml_tensors_info_initialize (ml_tensors_info_s * info)
+ 652 20167 : _ml_tensors_info_initialize (ml_tensors_info_s * info)
653 : {
- 654 19941 : if (!info)
+ 654 20167 : if (!info)
655 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
656 : "The parameter, info, is NULL. Provide a valid pointer.");
657 :
- 658 19940 : gst_tensors_info_init (&info->info);
+ 658 20166 : gst_tensors_info_init (&info->info);
659 :
- 660 19940 : return ML_ERROR_NONE;
+ 660 20166 : return ML_ERROR_NONE;
661 : }
662 :
663 : /**
@@ -727,12 +727,12 @@
665 : * @note This does not touch the lock. The caller should lock.
666 : */
667 : void
- 668 19932 : _ml_tensors_info_free (ml_tensors_info_s * info)
+ 668 20156 : _ml_tensors_info_free (ml_tensors_info_s * info)
669 : {
- 670 19932 : if (!info)
+ 670 20156 : if (!info)
671 0 : return;
672 :
- 673 19932 : gst_tensors_info_free (&info->info);
+ 673 20156 : gst_tensors_info_free (&info->info);
674 : }
675 :
676 : /**
@@ -742,21 +742,21 @@
680 : * @return @c 0 on success. Otherwise a negative error value.
681 : */
682 : int
- 683 13057 : _ml_tensors_data_destroy_internal (ml_tensors_data_h data, gboolean free_data)
+ 683 13206 : _ml_tensors_data_destroy_internal (ml_tensors_data_h data, gboolean free_data)
684 : {
- 685 13057 : int status = ML_ERROR_NONE;
+ 685 13206 : int status = ML_ERROR_NONE;
686 : ml_tensors_data_s *_data;
687 : guint i;
688 :
- 689 13057 : if (data == NULL)
+ 689 13206 : if (data == NULL)
690 19 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
691 : "The parameter, data, is NULL. It should be a valid ml_tensors_data_h handle, which is usually created by ml_tensors_data_create ().");
692 :
- 693 13038 : _data = (ml_tensors_data_s *) data;
- 694 13038 : G_LOCK_UNLESS_NOLOCK (*_data);
+ 693 13187 : _data = (ml_tensors_data_s *) data;
+ 694 13187 : G_LOCK_UNLESS_NOLOCK (*_data);
695 :
- 696 13038 : if (free_data) {
- 697 466 : if (_data->destroy) {
+ 696 13187 : if (free_data) {
+ 697 464 : if (_data->destroy) {
698 0 : status = _data->destroy (_data, _data->user_data);
699 0 : if (status != ML_ERROR_NONE) {
700 0 : G_UNLOCK_UNLESS_NOLOCK (*_data);
@@ -765,22 +765,22 @@
703 : status);
704 : }
705 : } else {
- 706 119762 : for (i = 0; i < ML_TENSOR_SIZE_LIMIT; i++) {
- 707 119296 : if (_data->tensors[i].data) {
- 708 690 : g_free (_data->tensors[i].data);
- 709 690 : _data->tensors[i].data = NULL;
+ 706 119248 : for (i = 0; i < ML_TENSOR_SIZE_LIMIT; i++) {
+ 707 118784 : if (_data->tensors[i].data) {
+ 708 688 : g_free (_data->tensors[i].data);
+ 709 688 : _data->tensors[i].data = NULL;
710 : }
711 : }
712 : }
713 : }
714 :
- 715 13038 : if (_data->info)
- 716 13038 : ml_tensors_info_destroy (_data->info);
+ 715 13187 : if (_data->info)
+ 716 13187 : ml_tensors_info_destroy (_data->info);
717 :
- 718 13038 : G_UNLOCK_UNLESS_NOLOCK (*_data);
- 719 13038 : g_mutex_clear (&_data->lock);
- 720 13038 : g_free (_data);
- 721 13038 : return status;
+ 718 13187 : G_UNLOCK_UNLESS_NOLOCK (*_data);
+ 719 13187 : g_mutex_clear (&_data->lock);
+ 720 13187 : g_free (_data);
+ 721 13187 : return status;
722 : }
723 :
724 : /**
@@ -788,15 +788,15 @@
726 : * @note This does not touch the lock
727 : */
728 : int
- 729 485 : ml_tensors_data_destroy (ml_tensors_data_h data)
+ 729 483 : ml_tensors_data_destroy (ml_tensors_data_h data)
730 : {
731 : int ret;
- 732 485 : check_feature_state (ML_FEATURE);
- 733 485 : ret = _ml_tensors_data_destroy_internal (data, TRUE);
- 734 485 : if (ret != ML_ERROR_NONE)
+ 732 483 : check_feature_state (ML_FEATURE);
+ 733 483 : ret = _ml_tensors_data_destroy_internal (data, TRUE);
+ 734 483 : if (ret != ML_ERROR_NONE)
735 19 : _ml_error_report_return_continue (ret,
736 : "Call to _ml_tensors_data_destroy_internal failed with %d", ret);
- 737 466 : return ret;
+ 737 464 : return ret;
738 : }
739 :
740 : /**
@@ -804,56 +804,56 @@
742 : * @note Memory for tensor data buffers is not allocated.
743 : */
744 : int
- 745 13082 : _ml_tensors_data_create_no_alloc (const ml_tensors_info_h info,
+ 745 13233 : _ml_tensors_data_create_no_alloc (const ml_tensors_info_h info,
746 : ml_tensors_data_h * data)
747 : {
748 : ml_tensors_data_s *_data;
749 : ml_tensors_info_s *_info;
750 : guint i;
- 751 13082 : int status = ML_ERROR_NONE;
+ 751 13233 : int status = ML_ERROR_NONE;
752 :
- 753 13082 : check_feature_state (ML_FEATURE);
+ 753 13233 : check_feature_state (ML_FEATURE);
754 :
- 755 13082 : if (data == NULL)
+ 755 13233 : if (data == NULL)
756 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
757 : "The parameter, data, is NULL. It should be a valid ml_tensors_info_h handle that may hold a space for ml_tensors_info_h. E.g., ml_tensors_data_h data; _ml_tensors_data_create_no_alloc (info, &data);.");
758 :
759 : /* init null */
- 760 13081 : *data = NULL;
+ 760 13232 : *data = NULL;
761 :
- 762 13081 : _data = g_new0 (ml_tensors_data_s, 1);
- 763 13081 : if (!_data)
+ 762 13232 : _data = g_new0 (ml_tensors_data_s, 1);
+ 763 13232 : if (!_data)
764 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
765 : "Failed to allocate memory for tensors data. Probably the system is out of memory.");
766 :
- 767 13081 : g_mutex_init (&_data->lock);
+ 767 13232 : g_mutex_init (&_data->lock);
768 :
- 769 13081 : _info = (ml_tensors_info_s *) info;
- 770 13081 : if (_info != NULL) {
- 771 6753 : status = _ml_tensors_info_create_from (info, &_data->info);
- 772 6753 : if (status != ML_ERROR_NONE) {
+ 769 13232 : _info = (ml_tensors_info_s *) info;
+ 770 13232 : if (_info != NULL) {
+ 771 6828 : status = _ml_tensors_info_create_from (info, &_data->info);
+ 772 6828 : if (status != ML_ERROR_NONE) {
773 0 : _ml_error_report_continue
774 : ("Failed to create internal information handle for tensors data.");
775 0 : goto error;
776 : }
777 :
- 778 6753 : G_LOCK_UNLESS_NOLOCK (*_info);
- 779 6753 : _data->num_tensors = _info->info.num_tensors;
- 780 13946 : for (i = 0; i < _data->num_tensors; i++) {
- 781 7193 : _data->tensors[i].size = gst_tensors_info_get_size (&_info->info, i);
- 782 7193 : _data->tensors[i].data = NULL;
+ 778 6828 : G_LOCK_UNLESS_NOLOCK (*_info);
+ 779 6828 : _data->num_tensors = _info->info.num_tensors;
+ 780 14096 : for (i = 0; i < _data->num_tensors; i++) {
+ 781 7268 : _data->tensors[i].size = gst_tensors_info_get_size (&_info->info, i);
+ 782 7268 : _data->tensors[i].data = NULL;
783 : }
- 784 6753 : G_UNLOCK_UNLESS_NOLOCK (*_info);
+ 784 6828 : G_UNLOCK_UNLESS_NOLOCK (*_info);
785 : }
786 :
- 787 6328 : error:
- 788 13081 : if (status == ML_ERROR_NONE) {
- 789 13081 : *data = _data;
+ 787 6404 : error:
+ 788 13232 : if (status == ML_ERROR_NONE) {
+ 789 13232 : *data = _data;
790 : } else {
791 0 : _ml_tensors_data_destroy_internal (_data, FALSE);
792 : }
793 :
- 794 13081 : return status;
+ 794 13232 : return status;
795 : }
796 :
797 : /**
@@ -973,48 +973,48 @@
911 : * @brief Allocates a tensor data frame with the given tensors info. (more info in nnstreamer.h)
912 : */
913 : int
- 914 6492 : ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_data_h * data)
+ 914 6567 : ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_data_h * data)
915 : {
- 916 6492 : gint status = ML_ERROR_STREAMS_PIPE;
- 917 6492 : ml_tensors_data_s *_data = NULL;
+ 916 6567 : gint status = ML_ERROR_STREAMS_PIPE;
+ 917 6567 : ml_tensors_data_s *_data = NULL;
918 : guint i;
919 : bool valid;
920 :
- 921 12984 : check_feature_state (ML_FEATURE);
+ 921 13134 : check_feature_state (ML_FEATURE);
922 :
- 923 6492 : if (info == NULL)
+ 923 6567 : if (info == NULL)
924 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
925 : "The parameter, info, is NULL. It should be a valid pointer of ml_tensors_info_h, which is usually created by ml_tensors_info_create().");
- 926 6491 : if (data == NULL)
+ 926 6566 : if (data == NULL)
927 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
928 : "The parameter, data, is NULL. It should be a valid space to hold a ml_tensors_data_h handle. E.g., ml_tensors_data_h data; ml_tensors_data_create (info, &data);.");
929 :
- 930 6490 : status = ml_tensors_info_validate (info, &valid);
- 931 6490 : if (status != ML_ERROR_NONE)
+ 930 6565 : status = ml_tensors_info_validate (info, &valid);
+ 931 6565 : if (status != ML_ERROR_NONE)
932 0 : _ml_error_report_return_continue (status,
933 : "ml_tensors_info_validate() has reported that the parameter, info, is not NULL, but its contents are not valid. The user must provide a valid tensor information with it.");
- 934 6490 : if (!valid)
+ 934 6565 : if (!valid)
935 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
936 : "The parameter, info, is not NULL, but its contents are not valid. The user must provide a valid tensor information with it. Probably, there is an entry that is not allocated or dimension/type information not available. The given info should have valid number of tensors, entries of every tensor along with its type and dimension info.");
937 :
938 : status =
- 939 6489 : _ml_tensors_data_create_no_alloc (info, (ml_tensors_data_h *) & _data);
+ 939 6564 : _ml_tensors_data_create_no_alloc (info, (ml_tensors_data_h *) & _data);
940 :
- 941 6489 : if (status != ML_ERROR_NONE) {
+ 941 6564 : if (status != ML_ERROR_NONE) {
942 0 : _ml_error_report_return_continue (status,
943 : "Failed to allocate tensor data based on the given info with the call to _ml_tensors_data_create_no_alloc (): %d. Check if it's out-of-memory.",
944 : status);
945 : }
946 :
- 947 13200 : for (i = 0; i < _data->num_tensors; i++) {
- 948 6711 : _data->tensors[i].data = g_malloc0 (_data->tensors[i].size);
- 949 6711 : if (_data->tensors[i].data == NULL) {
+ 947 13350 : for (i = 0; i < _data->num_tensors; i++) {
+ 948 6786 : _data->tensors[i].data = g_malloc0 (_data->tensors[i].size);
+ 949 6786 : if (_data->tensors[i].data == NULL) {
950 0 : goto failed_oom;
951 : }
952 : }
953 :
- 954 6489 : *data = _data;
- 955 6489 : return ML_ERROR_NONE;
+ 954 6564 : *data = _data;
+ 955 6564 : return ML_ERROR_NONE;
956 :
957 0 : failed_oom:
958 0 : _ml_tensors_data_destroy_internal (_data, TRUE);
@@ -1115,29 +1115,29 @@
1053 : * @brief Copies tensor meta info.
1054 : */
1055 : int
- 1056 6775 : ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src)
+ 1056 6850 : ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src)
1057 : {
1058 : ml_tensors_info_s *dest_info, *src_info;
- 1059 6775 : int status = ML_ERROR_NONE;
+ 1059 6850 : int status = ML_ERROR_NONE;
1060 :
- 1061 6775 : check_feature_state (ML_FEATURE);
+ 1061 6850 : check_feature_state (ML_FEATURE);
1062 :
- 1063 6775 : dest_info = (ml_tensors_info_s *) dest;
- 1064 6775 : src_info = (ml_tensors_info_s *) src;
+ 1063 6850 : dest_info = (ml_tensors_info_s *) dest;
+ 1064 6850 : src_info = (ml_tensors_info_s *) src;
1065 :
- 1066 6775 : if (!dest_info)
+ 1066 6850 : if (!dest_info)
1067 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1068 : "The parameter, dest, is NULL. It should be an allocated handle (ml_tensors_info_h), usually allocated by ml_tensors_info_create ().");
- 1069 6774 : if (!src_info)
+ 1069 6849 : if (!src_info)
1070 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1071 : "The parameter, src, is NULL. It should be a handle (ml_tensors_info_h) with valid data.");
1072 :
- 1073 6773 : G_LOCK_UNLESS_NOLOCK (*dest_info);
- 1074 6773 : G_LOCK_UNLESS_NOLOCK (*src_info);
+ 1073 6848 : G_LOCK_UNLESS_NOLOCK (*dest_info);
+ 1074 6848 : G_LOCK_UNLESS_NOLOCK (*src_info);
1075 :
- 1076 6773 : if (gst_tensors_info_validate (&src_info->info)) {
- 1077 6773 : dest_info->is_extended = src_info->is_extended;
- 1078 6773 : gst_tensors_info_copy (&dest_info->info, &src_info->info);
+ 1076 6848 : if (gst_tensors_info_validate (&src_info->info)) {
+ 1077 6848 : dest_info->is_extended = src_info->is_extended;
+ 1078 6848 : gst_tensors_info_copy (&dest_info->info, &src_info->info);
1079 : } else {
1080 0 : _ml_error_report
1081 : ("The parameter, src, is a ml_tensors_info_h handle without valid data. Every tensor-info of tensors-info should have a valid type and dimension information and the number of tensors should be between 1 and %d.",
@@ -1145,10 +1145,10 @@
1083 0 : status = ML_ERROR_INVALID_PARAMETER;
1084 : }
1085 :
- 1086 6773 : G_UNLOCK_UNLESS_NOLOCK (*src_info);
- 1087 6773 : G_UNLOCK_UNLESS_NOLOCK (*dest_info);
+ 1086 6848 : G_UNLOCK_UNLESS_NOLOCK (*src_info);
+ 1087 6848 : G_UNLOCK_UNLESS_NOLOCK (*dest_info);
1088 :
- 1089 6773 : return status;
+ 1089 6848 : return status;
1090 : }
1091 :
1092 : /**
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html
index df41abb6..b745c689 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -65,28 +65,28 @@
_ml_tensors_info_copy_from_ml |
- 6488 |
+ 6563 |
_ml_tensors_info_create_from_gst |
- 13001 |
+ 13152 |
_ml_tensors_info_copy_from_gst |
- 13049 |
+ 13200 |
gst_info_is_extended |
- 26046 |
+ 26348 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html
index 587427f2..4e158874 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -65,28 +65,28 @@
_ml_tensors_info_copy_from_gst |
- 13049 |
+ 13200 |
_ml_tensors_info_copy_from_ml |
- 6488 |
+ 6563 |
_ml_tensors_info_create_from_gst |
- 13001 |
+ 13152 |
gst_info_is_extended |
- 26046 |
+ 26348 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html
index b0c82a72..6c08cfc3 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-internal.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -83,54 +83,54 @@
21 : * @brief Check tensor-info has extended rank value.
22 : */
23 : static gboolean
- 24 26046 : gst_info_is_extended (const GstTensorsInfo * gst_info)
+ 24 26348 : gst_info_is_extended (const GstTensorsInfo * gst_info)
25 : {
26 : GstTensorInfo *_info;
27 : guint i;
28 :
- 29 53394 : for (i = 0; i < gst_info->num_tensors; i++) {
- 30 27372 : _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) gst_info, i);
- 31 27372 : if (!_info)
+ 29 53998 : for (i = 0; i < gst_info->num_tensors; i++) {
+ 30 27674 : _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) gst_info, i);
+ 31 27674 : if (!_info)
32 0 : _ml_error_report_return (FALSE,
33 : "The parameter, gst_info, has invalid number of tensors. The max number of tensors is "
34 : NNS_TENSOR_SIZE_LIMIT_STR);
35 :
- 36 27372 : if (_info->dimension[ML_TENSOR_RANK_LIMIT_PREV] > 0)
+ 36 27674 : if (_info->dimension[ML_TENSOR_RANK_LIMIT_PREV] > 0)
37 24 : return TRUE;
38 : }
39 :
- 40 26022 : return FALSE;
+ 40 26324 : return FALSE;
41 : }
42 :
43 : /**
44 : * @brief Allocates a tensors information handle from gst info.
45 : */
46 : int
- 47 13001 : _ml_tensors_info_create_from_gst (ml_tensors_info_h * ml_info,
+ 47 13152 : _ml_tensors_info_create_from_gst (ml_tensors_info_h * ml_info,
48 : GstTensorsInfo * gst_info)
49 : {
50 : gboolean is_extended;
51 :
- 52 13001 : if (!ml_info)
+ 52 13152 : if (!ml_info)
53 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
54 : "The parameter, ml_info, is NULL. It should be a valid ml_tensors_info_h instance usually created by ml_tensors_info_create(). This could be an internal bug of ML API.");
55 :
- 56 13000 : if (!gst_info)
+ 56 13151 : if (!gst_info)
57 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
58 : "The parameter, gst_info, is NULL. It should be a valid GstTensorsInfo instance. This could be an internal bug of ML API.");
59 :
- 60 12999 : is_extended = gst_info_is_extended (gst_info);
- 61 12999 : if (is_extended)
+ 60 13150 : is_extended = gst_info_is_extended (gst_info);
+ 61 13150 : if (is_extended)
62 7 : _ml_error_report_return_continue_iferr
63 : (ml_tensors_info_create_extended (ml_info),
64 : "The call to ml_tensors_info_create_extended has failed with %d.",
65 : _ERRNO);
66 : else
- 67 12992 : _ml_error_report_return_continue_iferr (ml_tensors_info_create (ml_info),
+ 67 13143 : _ml_error_report_return_continue_iferr (ml_tensors_info_create (ml_info),
68 : "The call to ml_tensors_info_create has failed with %d.", _ERRNO);
69 :
- 70 12999 : _ml_tensors_info_copy_from_gst (*ml_info, gst_info);
- 71 12999 : return ML_ERROR_NONE;
+ 70 13150 : _ml_tensors_info_copy_from_gst (*ml_info, gst_info);
+ 71 13150 : return ML_ERROR_NONE;
72 : }
73 :
74 : /**
@@ -138,26 +138,26 @@
76 : * @bug Thread safety required. Check its internal users first!
77 : */
78 : int
- 79 13049 : _ml_tensors_info_copy_from_gst (ml_tensors_info_h ml_info,
+ 79 13200 : _ml_tensors_info_copy_from_gst (ml_tensors_info_h ml_info,
80 : const GstTensorsInfo * gst_info)
81 : {
82 : ml_tensors_info_s *_info;
83 :
- 84 13049 : if (!ml_info)
+ 84 13200 : if (!ml_info)
85 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
86 : "The parmater, ml_info, is NULL. It should be a valid ml_tensors_info_s instance, usually created by ml_tensors_info_create(). This is probably an internal bug of ML API.");
- 87 13048 : if (!gst_info)
+ 87 13199 : if (!gst_info)
88 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
89 : "The parmater, gst_info, is NULL. It should be a valid GstTensorsInfo instance. This is probably an internal bug of ML API.");
90 :
- 91 13047 : _info = (ml_tensors_info_s *) ml_info;
+ 91 13198 : _info = (ml_tensors_info_s *) ml_info;
92 :
- 93 13047 : G_LOCK_UNLESS_NOLOCK (*_info);
- 94 13047 : _info->is_extended = gst_info_is_extended (gst_info);
- 95 13047 : gst_tensors_info_copy (&_info->info, gst_info);
- 96 13047 : G_UNLOCK_UNLESS_NOLOCK (*_info);
+ 93 13198 : G_LOCK_UNLESS_NOLOCK (*_info);
+ 94 13198 : _info->is_extended = gst_info_is_extended (gst_info);
+ 95 13198 : gst_tensors_info_copy (&_info->info, gst_info);
+ 96 13198 : G_UNLOCK_UNLESS_NOLOCK (*_info);
97 :
- 98 13047 : return ML_ERROR_NONE;
+ 98 13198 : return ML_ERROR_NONE;
99 : }
100 :
101 : /**
@@ -165,25 +165,25 @@
103 : * @bug Thread safety required. Check its internal users first!
104 : */
105 : int
- 106 6488 : _ml_tensors_info_copy_from_ml (GstTensorsInfo * gst_info,
+ 106 6563 : _ml_tensors_info_copy_from_ml (GstTensorsInfo * gst_info,
107 : const ml_tensors_info_h ml_info)
108 : {
109 : ml_tensors_info_s *_info;
110 :
- 111 6488 : if (!ml_info)
+ 111 6563 : if (!ml_info)
112 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
113 : "The parmater, ml_info, is NULL. It should be a valid ml_tensors_info_s instance, usually created by ml_tensors_info_create(). This is probably an internal bug of ML API.");
- 114 6487 : if (!gst_info)
+ 114 6562 : if (!gst_info)
115 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
116 : "The parmater, gst_info, is NULL. It should be a valid GstTensorsInfo instance. This is probably an internal bug of ML API.");
117 :
- 118 6486 : _info = (ml_tensors_info_s *) ml_info;
+ 118 6561 : _info = (ml_tensors_info_s *) ml_info;
119 :
- 120 6486 : G_LOCK_UNLESS_NOLOCK (*_info);
- 121 6486 : gst_tensors_info_copy (gst_info, &_info->info);
- 122 6486 : G_UNLOCK_UNLESS_NOLOCK (*_info);
+ 120 6561 : G_LOCK_UNLESS_NOLOCK (*_info);
+ 121 6561 : gst_tensors_info_copy (gst_info, &_info->info);
+ 122 6561 : G_UNLOCK_UNLESS_NOLOCK (*_info);
123 :
- 124 6486 : return ML_ERROR_NONE;
+ 124 6561 : return ML_ERROR_NONE;
125 : }
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html
index 22afb741..6b5de2b6 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -604,49 +604,49 @@
_pipe_src_cb_need_data |
- 6214 |
+ 6289 |
get_app_src_callback |
- 6214 |
+ 6289 |
ml_pipeline_src_get_tensors_info |
- 6218 |
+ 6293 |
ml_pipeline_src_input_data |
- 6302 |
+ 6377 |
cb_sink_event |
- 6328 |
+ 6404 |
ml_pipeline_src_parse_tensors_info |
- 12562 |
+ 12712 |
get_tensors_info_from_caps |
- 12590 |
+ 12740 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html
index 02c7c32e..0a7bf86a 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -100,7 +100,7 @@
_pipe_src_cb_need_data |
- 6214 |
+ 6289 |
@@ -128,7 +128,7 @@
cb_sink_event |
- 6328 |
+ 6404 |
@@ -184,7 +184,7 @@
get_app_src_callback |
- 6214 |
+ 6289 |
@@ -198,7 +198,7 @@
get_tensors_info_from_caps |
- 12590 |
+ 12740 |
@@ -492,21 +492,21 @@
ml_pipeline_src_get_tensors_info |
- 6218 |
+ 6293 |
ml_pipeline_src_input_data |
- 6302 |
+ 6377 |
ml_pipeline_src_parse_tensors_info |
- 12562 |
+ 12712 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html
index 42d35979..e5b49b5f 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-pipeline.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -315,69 +315,69 @@
253 : * @brief Internal function to get the tensors info from the element caps.
254 : */
255 : static gboolean
- 256 12590 : get_tensors_info_from_caps (GstCaps * caps, GstTensorsInfo * info,
+ 256 12740 : get_tensors_info_from_caps (GstCaps * caps, GstTensorsInfo * info,
257 : gboolean * is_flexible)
258 : {
259 : GstStructure *s;
260 : GstTensorsConfig config;
261 : guint i, n_caps;
- 262 12590 : gboolean found = FALSE;
+ 262 12740 : gboolean found = FALSE;
263 :
- 264 12590 : n_caps = gst_caps_get_size (caps);
+ 264 12740 : n_caps = gst_caps_get_size (caps);
265 :
- 266 12597 : for (i = 0; i < n_caps; i++) {
- 267 12591 : s = gst_caps_get_structure (caps, i);
- 268 12591 : found = gst_tensors_config_from_structure (&config, s);
+ 266 12747 : for (i = 0; i < n_caps; i++) {
+ 267 12741 : s = gst_caps_get_structure (caps, i);
+ 268 12741 : found = gst_tensors_config_from_structure (&config, s);
269 :
- 270 12591 : if (found) {
- 271 12584 : gst_tensors_info_free (info);
- 272 12584 : gst_tensors_info_copy (info, &config.info);
- 273 12584 : *is_flexible = gst_tensors_config_is_flexible (&config);
+ 270 12741 : if (found) {
+ 271 12734 : gst_tensors_info_free (info);
+ 272 12734 : gst_tensors_info_copy (info, &config.info);
+ 273 12734 : *is_flexible = gst_tensors_config_is_flexible (&config);
274 : }
275 :
- 276 12591 : gst_tensors_config_free (&config);
- 277 12591 : if (found)
- 278 12584 : break;
+ 276 12741 : gst_tensors_config_free (&config);
+ 277 12741 : if (found)
+ 278 12734 : break;
279 : }
280 :
- 281 12590 : return found;
+ 281 12740 : return found;
282 : }
283 :
284 : /**
285 : * @brief Handle a sink element for registered ml_pipeline_sink_cb
286 : */
287 : static void
- 288 6328 : cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data)
+ 288 6404 : cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data)
289 : {
- 290 6328 : ml_pipeline_element *elem = user_data;
+ 290 6404 : ml_pipeline_element *elem = user_data;
291 :
292 : /** @todo CRITICAL if the pipeline is being killed, don't proceed! */
293 : GstMemory *mem[ML_TENSOR_SIZE_LIMIT];
294 : GstMapInfo map[ML_TENSOR_SIZE_LIMIT];
295 : guint i, num_tensors;
296 : GList *l;
- 297 6328 : ml_tensors_data_s *_data = NULL;
+ 297 6404 : ml_tensors_data_s *_data = NULL;
298 : GstTensorsInfo gst_info;
299 : int status;
300 :
- 301 6328 : gst_tensors_info_init (&gst_info);
- 302 6328 : gst_info.num_tensors = num_tensors = gst_tensor_buffer_get_count (b);
+ 301 6404 : gst_tensors_info_init (&gst_info);
+ 302 6404 : gst_info.num_tensors = num_tensors = gst_tensor_buffer_get_count (b);
303 :
304 : /* Set tensor data. The handle for tensors-info in data should be added. */
305 : status =
- 306 6328 : _ml_tensors_data_create_no_alloc (NULL, (ml_tensors_data_h *) & _data);
- 307 6328 : if (status != ML_ERROR_NONE) {
+ 306 6404 : _ml_tensors_data_create_no_alloc (NULL, (ml_tensors_data_h *) & _data);
+ 307 6404 : if (status != ML_ERROR_NONE) {
308 0 : _ml_loge (_ml_detail
309 : ("Failed to allocate memory for tensors data in sink callback, which is registered by ml_pipeline_sink_register ()."));
- 310 6328 : return;
+ 310 6404 : return;
311 : }
312 :
- 313 6328 : g_mutex_lock (&elem->lock);
+ 313 6404 : g_mutex_lock (&elem->lock);
314 :
- 315 6328 : _data->num_tensors = num_tensors;
- 316 13133 : for (i = 0; i < num_tensors; i++) {
- 317 6805 : mem[i] = gst_tensor_buffer_get_nth_memory (b, i);
- 318 6805 : if (!gst_memory_map (mem[i], &map[i], GST_MAP_READ)) {
+ 315 6404 : _data->num_tensors = num_tensors;
+ 316 13285 : for (i = 0; i < num_tensors; i++) {
+ 317 6881 : mem[i] = gst_tensor_buffer_get_nth_memory (b, i);
+ 318 6881 : if (!gst_memory_map (mem[i], &map[i], GST_MAP_READ)) {
319 0 : _ml_loge (_ml_detail
320 : ("Failed to map the output in sink '%s' callback, which is registered by ml_pipeline_sink_register ()",
321 : elem->name));
@@ -386,12 +386,12 @@
324 0 : goto error;
325 : }
326 :
- 327 6805 : _data->tensors[i].data = map[i].data;
- 328 6805 : _data->tensors[i].size = map[i].size;
+ 327 6881 : _data->tensors[i].data = map[i].data;
+ 328 6881 : _data->tensors[i].size = map[i].size;
329 : }
330 :
331 : /** @todo This assumes that padcap is static */
- 332 6328 : if (elem->sink == NULL) {
+ 332 6404 : if (elem->sink == NULL) {
333 28 : gboolean found = FALSE;
334 28 : gboolean flexible = FALSE;
335 :
@@ -423,7 +423,7 @@
361 : }
362 :
363 : /* Prepare output and set data. */
- 364 6328 : if (elem->is_flexible_tensor) {
+ 364 6404 : if (elem->is_flexible_tensor) {
365 : GstTensorMetaInfo meta;
366 : gsize hsize;
367 :
@@ -439,10 +439,10 @@
377 9 : _data->tensors[i].size = map[i].size - hsize;
378 : }
379 : } else {
- 380 6325 : gst_tensors_info_copy (&gst_info, &elem->tensors_info);
+ 380 6401 : gst_tensors_info_copy (&gst_info, &elem->tensors_info);
381 :
382 : /* Compare output info and buffer if gst-buffer is not flexible. */
- 383 6325 : if (gst_info.num_tensors != num_tensors) {
+ 383 6401 : if (gst_info.num_tensors != num_tensors) {
384 0 : _ml_loge (_ml_detail
385 : ("The sink event of [%s] cannot be handled because the number of tensors mismatches.",
386 : elem->name));
@@ -452,15 +452,15 @@
390 0 : goto error;
391 : }
392 :
- 393 13121 : for (i = 0; i < num_tensors; i++) {
- 394 6796 : size_t sz = gst_tensors_info_get_size (&gst_info, i);
+ 393 13273 : for (i = 0; i < num_tensors; i++) {
+ 394 6872 : size_t sz = gst_tensors_info_get_size (&gst_info, i);
395 :
396 : /* Not configured, yet. */
- 397 6796 : if (sz == 0)
+ 397 6872 : if (sz == 0)
398 0 : _ml_loge (_ml_detail
399 : ("The caps for sink(%s) is not configured.", elem->name));
400 :
- 401 6796 : if (sz != map[i].size) {
+ 401 6872 : if (sz != map[i].size) {
402 0 : _ml_loge (_ml_detail
403 : ("The sink event of [%s] cannot be handled because the tensor dimension mismatches.",
404 : elem->name));
@@ -473,35 +473,35 @@
411 : }
412 :
413 : /* Create new output info, data handle should be updated here. */
- 414 6328 : _ml_tensors_info_create_from_gst (&_data->info, &gst_info);
+ 414 6404 : _ml_tensors_info_create_from_gst (&_data->info, &gst_info);
415 :
416 : /* Iterate e->handles, pass the data to them */
- 417 12662 : for (l = elem->handles; l != NULL; l = l->next) {
+ 417 12814 : for (l = elem->handles; l != NULL; l = l->next) {
418 : ml_pipeline_sink_cb callback;
- 419 6334 : ml_pipeline_common_elem *sink = l->data;
- 420 6334 : if (sink->callback_info == NULL)
+ 419 6410 : ml_pipeline_common_elem *sink = l->data;
+ 420 6410 : if (sink->callback_info == NULL)
421 3 : continue;
422 :
- 423 6331 : callback = sink->callback_info->sink_cb;
- 424 6331 : if (callback)
- 425 6331 : callback (_data, _data->info, sink->callback_info->sink_pdata);
+ 423 6407 : callback = sink->callback_info->sink_cb;
+ 424 6407 : if (callback)
+ 425 6407 : callback (_data, _data->info, sink->callback_info->sink_pdata);
426 :
427 : /** @todo Measure time. Warn if it takes long. Kill if it takes too long. */
428 : }
429 :
- 430 6328 : error:
- 431 6328 : g_mutex_unlock (&elem->lock);
+ 430 6404 : error:
+ 431 6404 : g_mutex_unlock (&elem->lock);
432 :
- 433 13133 : for (i = 0; i < num_tensors; i++) {
- 434 6805 : gst_memory_unmap (mem[i], &map[i]);
- 435 6805 : gst_memory_unref (mem[i]);
+ 433 13285 : for (i = 0; i < num_tensors; i++) {
+ 434 6881 : gst_memory_unmap (mem[i], &map[i]);
+ 435 6881 : gst_memory_unref (mem[i]);
436 : }
437 :
- 438 6328 : _ml_tensors_data_destroy_internal (_data, FALSE);
- 439 6328 : _data = NULL;
+ 438 6404 : _ml_tensors_data_destroy_internal (_data, FALSE);
+ 439 6404 : _data = NULL;
440 :
- 441 6328 : gst_tensors_info_free (&gst_info);
- 442 6328 : return;
+ 441 6404 : gst_tensors_info_free (&gst_info);
+ 442 6404 : return;
443 : }
444 :
445 : /**
@@ -1559,29 +1559,29 @@
1497 : * @brief Parse tensors info of src element.
1498 : */
1499 : static int
- 1500 12562 : ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem)
+ 1500 12712 : ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem)
1501 : {
- 1502 12562 : GstCaps *caps = NULL;
- 1503 12562 : gboolean found = FALSE, flexible = FALSE;
+ 1502 12712 : GstCaps *caps = NULL;
+ 1503 12712 : gboolean found = FALSE, flexible = FALSE;
1504 :
- 1505 12562 : if (elem->src == NULL) {
+ 1505 12712 : if (elem->src == NULL) {
1506 41 : elem->src = gst_element_get_static_pad (elem->element, "src");
1507 : }
1508 :
- 1509 12562 : if (elem->src == NULL) {
+ 1509 12712 : if (elem->src == NULL) {
1510 0 : _ml_error_report
1511 : ("Failed to get the src pad of the element[%s]. The designated source element does not have available src pad? For the detail, please check the GStreamer log messages.",
1512 : elem->name);
- 1513 12562 : return ML_ERROR_STREAMS_PIPE;
+ 1513 12712 : return ML_ERROR_STREAMS_PIPE;
1514 : }
1515 :
1516 : /* If caps is given, use it. e.g. Use cap "image/png" when the pipeline is */
1517 : /* given as "appsrc caps=image/png ! pngdec ! ... " */
- 1518 12562 : caps = gst_pad_get_current_caps (elem->src);
- 1519 12562 : if (!caps)
- 1520 12498 : caps = gst_pad_get_allowed_caps (elem->src);
+ 1518 12712 : caps = gst_pad_get_current_caps (elem->src);
+ 1519 12712 : if (!caps)
+ 1520 12648 : caps = gst_pad_get_allowed_caps (elem->src);
1521 :
- 1522 12562 : if (!caps) {
+ 1522 12712 : if (!caps) {
1523 0 : _ml_logw
1524 : ("Cannot find caps. The pipeline is not yet negotiated for src element [%s].",
1525 : elem->name);
@@ -1590,10 +1590,10 @@
1528 0 : return ML_ERROR_TRY_AGAIN;
1529 : }
1530 :
- 1531 12562 : found = get_tensors_info_from_caps (caps, &elem->tensors_info, &flexible);
+ 1531 12712 : found = get_tensors_info_from_caps (caps, &elem->tensors_info, &flexible);
1532 :
- 1533 12562 : if (found) {
- 1534 12556 : elem->is_flexible_tensor = flexible;
+ 1533 12712 : if (found) {
+ 1534 12706 : elem->is_flexible_tensor = flexible;
1535 : } else {
1536 6 : if (gst_caps_is_fixed (caps)) {
1537 5 : GstStructure *st = gst_caps_get_structure (caps, 0);
@@ -1601,8 +1601,8 @@
1539 : }
1540 : }
1541 :
- 1542 12562 : gst_caps_unref (caps);
- 1543 12562 : return ML_ERROR_NONE;
+ 1542 12712 : gst_caps_unref (caps);
+ 1543 12712 : return ML_ERROR_NONE;
1544 : }
1545 :
1546 : /**
@@ -1698,7 +1698,7 @@
1636 : * @brief Push a data frame to a src (more info in nnstreamer.h)
1637 : */
1638 : int
- 1639 6302 : ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data,
+ 1639 6377 : ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data,
1640 : ml_pipeline_buf_policy_e policy)
1641 : {
1642 : GstBuffer *buffer;
@@ -1710,18 +1710,18 @@
1648 : ml_tensors_data_s *_data;
1649 : unsigned int i;
1650 :
- 1651 12604 : handle_init (src, h);
+ 1651 12754 : handle_init (src, h);
1652 :
- 1653 6302 : _data = (ml_tensors_data_s *) data;
- 1654 6302 : if (!_data) {
+ 1653 6377 : _data = (ml_tensors_data_s *) data;
+ 1654 6377 : if (!_data) {
1655 1 : _ml_error_report
1656 : ("The given parameter, data (ml_tensors_data_h), is NULL. It should be a valid ml_tensor_data_h instance, which is usually created by ml_tensors_data_create().");
1657 1 : ret = ML_ERROR_INVALID_PARAMETER;
1658 1 : goto unlock_return;
1659 : }
- 1660 6301 : G_LOCK_UNLESS_NOLOCK (*_data);
+ 1660 6376 : G_LOCK_UNLESS_NOLOCK (*_data);
1661 :
- 1662 6301 : if (_data->num_tensors < 1 || _data->num_tensors > ML_TENSOR_SIZE_LIMIT) {
+ 1662 6376 : if (_data->num_tensors < 1 || _data->num_tensors > ML_TENSOR_SIZE_LIMIT) {
1663 0 : _ml_error_report
1664 : ("The number of tensors of the given data (ml_tensors_data_h) is invalid. The number of tensors of data is %u. It should be between 1 and %u.",
1665 : _data->num_tensors, ML_TENSOR_SIZE_LIMIT);
@@ -1729,9 +1729,9 @@
1667 0 : goto dont_destroy_data;
1668 : }
1669 :
- 1670 6301 : ret = ml_pipeline_src_parse_tensors_info (elem);
+ 1670 6376 : ret = ml_pipeline_src_parse_tensors_info (elem);
1671 :
- 1672 6301 : if (ret != ML_ERROR_NONE) {
+ 1672 6376 : if (ret != ML_ERROR_NONE) {
1673 0 : if (ret == ML_ERROR_TRY_AGAIN)
1674 0 : _ml_error_report_continue
1675 : ("The pipeline is not ready to accept input streams. The input is ignored.");
@@ -1741,8 +1741,8 @@
1679 0 : goto dont_destroy_data;
1680 : }
1681 :
- 1682 6301 : if (!elem->is_media_stream && !elem->is_flexible_tensor) {
- 1683 6294 : if (elem->tensors_info.num_tensors != _data->num_tensors) {
+ 1682 6376 : if (!elem->is_media_stream && !elem->is_flexible_tensor) {
+ 1683 6369 : if (elem->tensors_info.num_tensors != _data->num_tensors) {
1684 0 : _ml_error_report
1685 : ("The src push of [%s] cannot be handled because the number of tensors in a frame mismatches. %u != %u",
1686 : elem->name, elem->tensors_info.num_tensors, _data->num_tensors);
@@ -1751,10 +1751,10 @@
1689 0 : goto dont_destroy_data;
1690 : }
1691 :
- 1692 12741 : for (i = 0; i < _data->num_tensors; i++) {
- 1693 6449 : size_t sz = gst_tensors_info_get_size (&elem->tensors_info, i);
+ 1692 12891 : for (i = 0; i < _data->num_tensors; i++) {
+ 1693 6524 : size_t sz = gst_tensors_info_get_size (&elem->tensors_info, i);
1694 :
- 1695 6449 : if (sz != _data->tensors[i].size) {
+ 1695 6524 : if (sz != _data->tensors[i].size) {
1696 2 : _ml_error_report
1697 : ("The given input tensor size (%d'th, %zu bytes) mismatches the source pad (%zu bytes)",
1698 : i, _data->tensors[i].size, sz);
@@ -1766,21 +1766,21 @@
1704 : }
1705 :
1706 : /* Create buffer to be pushed from buf[] */
- 1707 6299 : buffer = gst_buffer_new ();
- 1708 6299 : _ml_tensors_info_copy_from_ml (&gst_info, _data->info);
+ 1707 6374 : buffer = gst_buffer_new ();
+ 1708 6374 : _ml_tensors_info_copy_from_ml (&gst_info, _data->info);
1709 :
- 1710 12759 : for (i = 0; i < _data->num_tensors; i++) {
+ 1710 12909 : for (i = 0; i < _data->num_tensors; i++) {
1711 : GstTensorInfo *_gst_tensor_info =
- 1712 6460 : gst_tensors_info_get_nth_info (&gst_info, i);
- 1713 6460 : mem_data = _data->tensors[i].data;
- 1714 6460 : mem_size = _data->tensors[i].size;
+ 1712 6535 : gst_tensors_info_get_nth_info (&gst_info, i);
+ 1713 6535 : mem_data = _data->tensors[i].data;
+ 1714 6535 : mem_size = _data->tensors[i].size;
1715 :
- 1716 6460 : mem = tmp = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
+ 1716 6535 : mem = tmp = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
1717 : mem_data, mem_size, 0, mem_size, mem_data,
1718 : (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) ? g_free : NULL);
1719 :
1720 : /* flex tensor, append header. */
- 1721 6460 : if (elem->is_flexible_tensor) {
+ 1721 6535 : if (elem->is_flexible_tensor) {
1722 : GstTensorMetaInfo meta;
1723 :
1724 9 : gst_tensor_info_convert_to_meta (_gst_tensor_info, &meta);
@@ -1789,76 +1789,76 @@
1727 9 : gst_memory_unref (tmp);
1728 : }
1729 :
- 1730 6460 : gst_tensor_buffer_append_memory (buffer, mem, _gst_tensor_info);
+ 1730 6535 : gst_tensor_buffer_append_memory (buffer, mem, _gst_tensor_info);
1731 : /** @todo Verify that gst_buffer_append lists tensors/gstmem in the correct order */
1732 : }
1733 :
- 1734 6299 : gst_tensors_info_free (&gst_info);
+ 1734 6374 : gst_tensors_info_free (&gst_info);
1735 :
1736 : /* Unlock if it's not auto-free. We do not know when it'll be freed. */
- 1737 6299 : if (policy != ML_PIPELINE_BUF_POLICY_AUTO_FREE)
+ 1737 6374 : if (policy != ML_PIPELINE_BUF_POLICY_AUTO_FREE)
1738 55 : G_UNLOCK_UNLESS_NOLOCK (*_data);
1739 :
1740 : /* Push the data! */
- 1741 6299 : gret = gst_app_src_push_buffer (GST_APP_SRC (elem->element), buffer);
+ 1741 6374 : gret = gst_app_src_push_buffer (GST_APP_SRC (elem->element), buffer);
1742 :
1743 : /* Free data ptr if buffer policy is auto-free */
- 1744 6299 : if (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) {
- 1745 6244 : G_UNLOCK_UNLESS_NOLOCK (*_data);
- 1746 6244 : _ml_tensors_data_destroy_internal (_data, FALSE);
- 1747 6244 : _data = NULL;
+ 1744 6374 : if (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) {
+ 1745 6319 : G_UNLOCK_UNLESS_NOLOCK (*_data);
+ 1746 6319 : _ml_tensors_data_destroy_internal (_data, FALSE);
+ 1747 6319 : _data = NULL;
1748 : }
1749 :
- 1750 6299 : if (gret == GST_FLOW_FLUSHING) {
+ 1750 6374 : if (gret == GST_FLOW_FLUSHING) {
1751 0 : _ml_logw
1752 : ("The pipeline is not in PAUSED/PLAYING. The input may be ignored.");
1753 0 : ret = ML_ERROR_TRY_AGAIN;
- 1754 6299 : } else if (gret == GST_FLOW_EOS) {
+ 1754 6374 : } else if (gret == GST_FLOW_EOS) {
1755 0 : _ml_logw ("THe pipeline is in EOS state. The input is ignored.");
1756 0 : ret = ML_ERROR_STREAMS_PIPE;
1757 : }
1758 :
- 1759 6299 : goto unlock_return;
+ 1759 6374 : goto unlock_return;
1760 :
1761 2 : dont_destroy_data:
1762 2 : G_UNLOCK_UNLESS_NOLOCK (*_data);
1763 :
- 1764 6302 : handle_exit (h);
+ 1764 6377 : handle_exit (h);
1765 : }
1766 :
1767 : /**
1768 : * @brief Internal function to fetch ml_pipeline_src_callbacks_s pointer
1769 : */
1770 : static ml_pipeline_src_callbacks_s *
- 1771 6214 : get_app_src_callback (ml_pipeline_common_elem * src_h, void **data)
+ 1771 6289 : get_app_src_callback (ml_pipeline_common_elem * src_h, void **data)
1772 : {
- 1773 6214 : ml_pipeline_src_callbacks_s *src_cb = NULL;
+ 1773 6289 : ml_pipeline_src_callbacks_s *src_cb = NULL;
1774 :
- 1775 6214 : if (src_h->callback_info) {
- 1776 6214 : src_cb = &src_h->callback_info->src_cb;
- 1777 6214 : *data = src_h->callback_info->src_pdata;
+ 1775 6289 : if (src_h->callback_info) {
+ 1776 6289 : src_cb = &src_h->callback_info->src_cb;
+ 1777 6289 : *data = src_h->callback_info->src_pdata;
1778 : }
1779 :
- 1780 6214 : return src_cb;
+ 1780 6289 : return src_cb;
1781 : }
1782 :
1783 : /**
1784 : * @brief Internal function for appsrc callback - need_data.
1785 : */
1786 : static void
- 1787 6214 : _pipe_src_cb_need_data (GstAppSrc * src, guint length, gpointer user_data)
+ 1787 6289 : _pipe_src_cb_need_data (GstAppSrc * src, guint length, gpointer user_data)
1788 : {
1789 : ml_pipeline_common_elem *src_h;
- 1790 6214 : ml_pipeline_src_callbacks_s *src_cb = NULL;
- 1791 6214 : void *pdata = NULL;
+ 1790 6289 : ml_pipeline_src_callbacks_s *src_cb = NULL;
+ 1791 6289 : void *pdata = NULL;
1792 :
- 1793 6214 : src_h = (ml_pipeline_common_elem *) user_data;
- 1794 6214 : if (!src_h)
+ 1793 6289 : src_h = (ml_pipeline_common_elem *) user_data;
+ 1794 6289 : if (!src_h)
1795 0 : return;
1796 :
- 1797 6214 : src_cb = get_app_src_callback (src_h, &pdata);
- 1798 6214 : if (src_cb && src_cb->need_data)
- 1799 6214 : src_cb->need_data (src_h, length, pdata);
+ 1797 6289 : src_cb = get_app_src_callback (src_h, &pdata);
+ 1798 6289 : if (src_cb && src_cb->need_data)
+ 1799 6289 : src_cb->need_data (src_h, length, pdata);
1800 : }
1801 :
1802 : /**
@@ -1944,27 +1944,27 @@
1882 : * @brief Gets a handle for the tensors metadata of given src node.
1883 : */
1884 : int
- 1885 6218 : ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h, ml_tensors_info_h * info)
+ 1885 6293 : ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h, ml_tensors_info_h * info)
1886 : {
- 1887 6218 : handle_init (src, h);
+ 1887 6293 : handle_init (src, h);
1888 :
- 1889 6218 : if (info == NULL) {
+ 1889 6293 : if (info == NULL) {
1890 0 : _ml_error_report
1891 : ("The parameter, info (ml_tensors_info_h *), is NULL. It should be a valid pointer to a ml_tensors_info_h instance, which is usually created by ml_tensors_info_create().");
1892 0 : ret = ML_ERROR_INVALID_PARAMETER;
1893 0 : goto unlock_return;
1894 : }
1895 :
- 1896 6218 : ret = ml_pipeline_src_parse_tensors_info (elem);
+ 1896 6293 : ret = ml_pipeline_src_parse_tensors_info (elem);
1897 :
- 1898 6218 : if (ret == ML_ERROR_NONE) {
- 1899 6218 : ret = _ml_tensors_info_create_from_gst (info, &elem->tensors_info);
+ 1898 6293 : if (ret == ML_ERROR_NONE) {
+ 1899 6293 : ret = _ml_tensors_info_create_from_gst (info, &elem->tensors_info);
1900 : } else {
1901 0 : _ml_error_report_continue
1902 : ("ml_pipeline_src_parse_tensors_info () has returned error; it cannot fetch input tensor info (metadata of input stream) for the given ml_pipeline_src_h handle (h). ml_pipeline_src_get_tensors_info () cannot continue.");
1903 : }
1904 :
- 1905 6218 : handle_exit (h);
+ 1905 6293 : handle_exit (h);
1906 : }
1907 :
1908 : /****************************************************
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html
index b30da22c..cdc55c20 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -63,23 +63,23 @@
- __destroy_notify |
+ ml_single_destroy_notify_cb |
0 |
- ml_single_destroy_notify_cb |
+ ml_single_set_inout_tensors_info |
0 |
- ml_single_set_inout_tensors_info |
+ __destroy_notify |
- 0 |
+ 1 |
@@ -226,7 +226,7 @@
__invoke |
- 88 |
+ 87 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html
index 4e5c70d6..9ee9c35b 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c - functions
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -65,14 +65,14 @@
__destroy_notify |
- 0 |
+ 1 |
__invoke |
- 88 |
+ 87 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html
index 8ccdb132..3e0a64c9 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-inference-single.c
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -397,29 +397,29 @@
335 : * @brief To call the framework to destroy the allocated output data
336 : */
337 : static inline void
- 338 0 : __destroy_notify (gpointer data_h, gpointer single_data)
+ 338 1 : __destroy_notify (gpointer data_h, gpointer single_data)
339 : {
340 : ml_single *single_h;
341 : ml_tensors_data_s *data;
342 :
- 343 0 : data = (ml_tensors_data_s *) data_h;
- 344 0 : single_h = (ml_single *) single_data;
+ 343 1 : data = (ml_tensors_data_s *) data_h;
+ 344 1 : single_h = (ml_single *) single_data;
345 :
- 346 0 : if (G_LIKELY (single_h->filter)) {
- 347 0 : if (single_h->klass->allocate_in_invoke (single_h->filter)) {
- 348 0 : single_h->klass->destroy_notify (single_h->filter, data->tensors);
+ 346 1 : if (G_LIKELY (single_h->filter)) {
+ 347 1 : if (single_h->klass->allocate_in_invoke (single_h->filter)) {
+ 348 0 : single_h->klass->destroy_notify (single_h->filter, data->tensors);
349 : }
350 : }
351 :
352 : /* reset callback function */
- 353 0 : data->destroy = NULL;
- 354 0 : }
+ 353 1 : data->destroy = NULL;
+ 354 1 : }
355 :
356 : /**
357 : * @brief Wrapper function for __destroy_notify
358 : */
359 : static int
- 360 0 : ml_single_destroy_notify_cb (void *handle, void *user_data)
+ 360 0 : ml_single_destroy_notify_cb (void *handle, void *user_data)
361 : {
362 0 : ml_tensors_data_h data = (ml_tensors_data_h) handle;
363 0 : ml_single_h single = (ml_single_h) user_data;
@@ -478,24 +478,24 @@
416 : * @brief Internal function to call subplugin's invoke
417 : */
418 : static inline int
- 419 88 : __invoke (ml_single * single_h, ml_tensors_data_h in, ml_tensors_data_h out,
+ 419 87 : __invoke (ml_single * single_h, ml_tensors_data_h in, ml_tensors_data_h out,
420 : gboolean alloc_output)
421 : {
422 : ml_tensors_data_s *in_data, *out_data;
- 423 88 : int status = ML_ERROR_NONE;
+ 423 87 : int status = ML_ERROR_NONE;
424 :
- 425 88 : in_data = (ml_tensors_data_s *) in;
- 426 88 : out_data = (ml_tensors_data_s *) out;
+ 425 87 : in_data = (ml_tensors_data_s *) in;
+ 426 87 : out_data = (ml_tensors_data_s *) out;
427 :
428 : /* Prevent error case when input or output is null in invoke thread. */
- 429 88 : if (!in_data || !out_data) {
+ 429 87 : if (!in_data || !out_data) {
430 0 : _ml_error_report ("Failed to invoke a model, invalid data handle.");
431 0 : return ML_ERROR_STREAMS_PIPE;
432 : }
433 :
434 : /* Invoke the thread. */
- 435 88 : if (!single_h->klass->invoke (single_h->filter, in_data->tensors,
- 436 88 : out_data->tensors, alloc_output)) {
+ 435 87 : if (!single_h->klass->invoke (single_h->filter, in_data->tensors,
+ 436 87 : out_data->tensors, alloc_output)) {
437 0 : const char *fw_name = _ml_get_nnfw_subplugin_name (single_h->nnfw);
438 0 : _ml_error_report
439 : ("Failed to invoke the tensors. The invoke callback of the tensor-filter subplugin '%s' has failed. Please contact the author of tensor-filter-%s (nnstreamer-%s) or review its source code. Note that this usually happens when the designated framework does not support the given model (e.g., trying to run tf-lite 2.6 model with tf-lite 1.13).",
@@ -503,7 +503,7 @@
441 0 : status = ML_ERROR_STREAMS_PIPE;
442 : }
443 :
- 444 88 : return status;
+ 444 87 : return status;
445 : }
446 :
447 : /**
@@ -563,35 +563,35 @@
501 102 : int status = ML_ERROR_NONE;
502 :
503 : /** wait for data */
- 504 126 : while (single_h->state != RUNNING) {
+ 504 125 : while (single_h->state != RUNNING) {
505 102 : g_cond_wait (&single_h->cond, &single_h->mutex);
506 100 : if (single_h->state == JOIN_REQUESTED)
- 507 76 : goto exit;
+ 507 77 : goto exit;
508 : }
509 :
- 510 24 : input = single_h->input;
- 511 24 : output = single_h->output;
+ 510 23 : input = single_h->input;
+ 511 23 : output = single_h->output;
512 : /* Set null to prevent double-free. */
- 513 24 : single_h->input = single_h->output = NULL;
+ 513 23 : single_h->input = single_h->output = NULL;
514 :
- 515 24 : single_h->invoking = TRUE;
- 516 24 : alloc_output = single_h->free_output;
- 517 24 : g_mutex_unlock (&single_h->mutex);
- 518 24 : status = __invoke (single_h, input, output, alloc_output);
- 519 24 : g_mutex_lock (&single_h->mutex);
+ 515 23 : single_h->invoking = TRUE;
+ 516 23 : alloc_output = single_h->free_output;
+ 517 23 : g_mutex_unlock (&single_h->mutex);
+ 518 23 : status = __invoke (single_h, input, output, alloc_output);
+ 519 23 : g_mutex_lock (&single_h->mutex);
520 : /* Clear input data after invoke is done. */
- 521 24 : ml_tensors_data_destroy (input);
- 522 24 : single_h->invoking = FALSE;
+ 521 23 : ml_tensors_data_destroy (input);
+ 522 23 : single_h->invoking = FALSE;
523 :
- 524 24 : if (status != ML_ERROR_NONE || single_h->state == JOIN_REQUESTED) {
- 525 4 : if (alloc_output) {
- 526 4 : single_h->destroy_data_list =
- 527 4 : g_list_remove (single_h->destroy_data_list, output);
- 528 4 : ml_tensors_data_destroy (output);
+ 524 23 : if (status != ML_ERROR_NONE || single_h->state == JOIN_REQUESTED) {
+ 525 3 : if (alloc_output) {
+ 526 3 : single_h->destroy_data_list =
+ 527 3 : g_list_remove (single_h->destroy_data_list, output);
+ 528 3 : ml_tensors_data_destroy (output);
529 : }
530 :
- 531 4 : if (single_h->state == JOIN_REQUESTED)
- 532 4 : goto exit;
+ 531 3 : if (single_h->state == JOIN_REQUESTED)
+ 532 3 : goto exit;
533 0 : goto wait_for_next;
534 : }
535 :
@@ -1298,10 +1298,10 @@
1236 80 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1237 :
1238 : /** Wait until invoke process is finished */
- 1239 1047 : while (invoking) {
- 1240 967 : _ml_logd ("Wait 1 ms until invoke is finished and close the handle.");
- 1241 967 : g_usleep (1000);
- 1242 967 : invoking = single_h->invoking;
+ 1239 892 : while (invoking) {
+ 1240 812 : _ml_logd ("Wait 1 ms until invoke is finished and close the handle.");
+ 1241 812 : g_usleep (1000);
+ 1242 812 : invoking = single_h->invoking;
1243 : /**
1244 : * single_h->invoking is the only protected value here and we are
1245 : * doing a read-only operation and do not need to project its value
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html
index 587301d8..c4d5edd1 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html
index 47c35994..266af3f7 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html
index 2ed98cce..a4faf849 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-agent-client.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html
index 15b48e1c..e0228808 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html
index 8ea2fcb0..8b539fad 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html
index 572d1e91..6b9a3891 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-extension.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html
index bb051fca..9fde5098 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -240,7 +240,7 @@
_mlrs_edge_event_cb |
- 55 |
+ 54 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html
index cbfb588c..96963ff6 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -170,7 +170,7 @@
_mlrs_edge_event_cb |
- 55 |
+ 54 |
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html
index b87d65bf..3e2fc8a8 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-offloading.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
@@ -613,17 +613,17 @@
551 : * @brief Edge event callback.
552 : */
553 : static int
- 554 55 : _mlrs_edge_event_cb (nns_edge_event_h event_h, void *user_data)
+ 554 54 : _mlrs_edge_event_cb (nns_edge_event_h event_h, void *user_data)
555 : {
- 556 55 : nns_edge_event_e event = NNS_EDGE_EVENT_UNKNOWN;
- 557 55 : nns_edge_data_h data_h = NULL;
- 558 55 : int ret = NNS_EDGE_ERROR_NONE;
+ 556 54 : nns_edge_event_e event = NNS_EDGE_EVENT_UNKNOWN;
+ 557 54 : nns_edge_data_h data_h = NULL;
+ 558 54 : int ret = NNS_EDGE_ERROR_NONE;
559 :
- 560 55 : ret = nns_edge_event_get_type (event_h, &event);
- 561 55 : if (NNS_EDGE_ERROR_NONE != ret)
- 562 55 : return ret;
+ 560 54 : ret = nns_edge_event_get_type (event_h, &event);
+ 561 54 : if (NNS_EDGE_ERROR_NONE != ret)
+ 562 54 : return ret;
563 :
- 564 55 : switch (event) {
+ 564 54 : switch (event) {
565 17 : case NNS_EDGE_EVENT_NEW_DATA_RECEIVED:
566 : {
567 17 : ret = nns_edge_event_parse_new_data (event_h, &data_h);
@@ -633,14 +633,14 @@
571 17 : ret = _mlrs_process_service_offloading (data_h, user_data);
572 17 : break;
573 : }
- 574 38 : default:
- 575 38 : break;
+ 574 37 : default:
+ 575 37 : break;
576 : }
577 :
- 578 55 : if (data_h)
+ 578 54 : if (data_h)
579 17 : nns_edge_data_destroy (data_h);
580 :
- 581 55 : return ret;
+ 581 54 : return ret;
582 : }
583 :
584 : /**
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html
index b1268128..d0657a1b 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html
index a04af5d9..d401d1c1 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html
index db451410..e5d4f4d1 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-query-client.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html
index 5e205530..ac412484 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html
index a959b98c..7299752a 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html
index 57a1371f..1239b9a6 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service-training-offloading.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html
index 0bb6d7cd..cf9fffb6 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func-c.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html
index a5f407e4..017e95af 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.func.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c - functions
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html
index 4be6785b..2993cac1 100644
--- a/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html
+++ b/testresult/ml-api/capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c.gcov.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491 - capi-machine-learning-inference-1.8.6/c/src/ml-api-service.c
@@ -28,7 +28,7 @@
-
+
|
@@ -37,7 +37,7 @@
-
+
|
diff --git a/testresult/ml-api/cmd_line b/testresult/ml-api/cmd_line
index 4269fee0..6b91f329 100644
--- a/testresult/ml-api/cmd_line
+++ b/testresult/ml-api/cmd_line
@@ -1 +1 @@
-genhtml -o result unittest-filtered.info -t 'ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0' --ignore-errors source -p /home/abuild/rpmbuild/BUILD
+genhtml -o result unittest-filtered.info -t 'ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491' --ignore-errors source -p /home/abuild/rpmbuild/BUILD
diff --git a/testresult/ml-api/coverage_badge.svg b/testresult/ml-api/coverage_badge.svg
index 6185bd92..39c62830 100644
--- a/testresult/ml-api/coverage_badge.svg
+++ b/testresult/ml-api/coverage_badge.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/testresult/ml-api/index-sort-f.html b/testresult/ml-api/index-sort-f.html
index abf178cb..c7dd1b71 100644
--- a/testresult/ml-api/index-sort-f.html
+++ b/testresult/ml-api/index-sort-f.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -82,14 +82,14 @@
capi-machine-learning-inference-1.8.6/c/src |
-
+
|
- 83.1 % |
+ 83.3 % |
5160 |
- 4289 |
- 96.6 % |
+ 4296 |
+ 96.9 % |
294 |
- 284 |
+ 285 |
diff --git a/testresult/ml-api/index-sort-l.html b/testresult/ml-api/index-sort-l.html
index 9d263fd2..1acffe2e 100644
--- a/testresult/ml-api/index-sort-l.html
+++ b/testresult/ml-api/index-sort-l.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -82,14 +82,14 @@
capi-machine-learning-inference-1.8.6/c/src |
-
+
|
- 83.1 % |
+ 83.3 % |
5160 |
- 4289 |
- 96.6 % |
+ 4296 |
+ 96.9 % |
294 |
- 284 |
+ 285 |
diff --git a/testresult/ml-api/index.html b/testresult/ml-api/index.html
index e418c49f..0f1820b3 100644
--- a/testresult/ml-api/index.html
+++ b/testresult/ml-api/index.html
@@ -4,7 +4,7 @@
- LCOV - ML API 1.8.6-0 nnstreamer/api#eba2cbed9702a44dbaf42fe53368e72c419751a0
+ LCOV - ML API 1.8.6-0 gichan-jang/api#48ca203121742beeae4e812d81565013cd585491
@@ -28,21 +28,21 @@
-
+
|
-
+
-
+
-
+
|
-
+
-
+
|
@@ -82,14 +82,14 @@
capi-machine-learning-inference-1.8.6/c/src |
-
+
|
- 83.1 % |
+ 83.3 % |
5160 |
- 4289 |
- 96.6 % |
+ 4296 |
+ 96.9 % |
294 |
- 284 |
+ 285 |