34 #include "../internal.h"
37 #include <openvino/c/openvino.h>
39 #include <c_api/ie_c_api.h>
97 #define APPEND_STRING(generated_string, iterate_string) \
98 generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
99 av_asprintf("%s", iterate_string);
101 #define OFFSET(x) offsetof(OVContext, x)
102 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
112 {
"scale",
"Add scale preprocess operation. Divide each element of input by specified value.",
OFFSET(
options.scale),
AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX,
FLAGS},
113 {
"mean",
"Add mean preprocess operation. Subtract specified value from each element of input.",
OFFSET(
options.mean),
AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX,
FLAGS},
120 static const struct {
125 { OK, 0,
"success" },
127 { NOT_IMPLEMENTED,
AVERROR(ENOSYS),
"not implemented" },
129 { PARAMETER_MISMATCH,
AVERROR(EINVAL),
"parameter mismatch" },
131 { OUT_OF_BOUNDS,
AVERROR(EOVERFLOW),
"out of bounds" },
133 { REQUEST_BUSY,
AVERROR(EBUSY),
"request busy" },
134 { RESULT_NOT_READY,
AVERROR(EBUSY),
"result not ready" },
135 { NOT_ALLOCATED,
AVERROR(ENODATA),
"not allocated" },
138 { INFER_CANCELLED,
AVERROR(ECANCELED),
"infer cancelled" },
139 { INVALID_C_PARAM,
AVERROR(EINVAL),
"invalid C parameter" },
141 { NOT_IMPLEMENT_C_METHOD,
AVERROR(ENOSYS),
"not implement C method" },
156 *
desc =
"unknown error";
188 return sizeof(
float);
190 return sizeof(uint8_t);
206 ov_tensor_t* tensor =
NULL;
207 ov_shape_t input_shape = {0};
208 ov_element_type_e precision;
212 precision_e precision;
213 ie_blob_buffer_t blob_buffer;
215 ie_blob_t *input_blob =
NULL;
225 ov_output_const_port_free(ov_model->
input_port);
250 dims = input_shape.dims;
254 ov_shape_free(&input_shape);
257 for (
int i = 0;
i < input_shape.rank;
i++)
268 status |= ie_blob_get_dims(input_blob, &dims);
269 status |= ie_blob_get_precision(input_blob, &precision);
271 ie_blob_free(&input_blob);
276 status = ie_blob_get_buffer(input_blob, &blob_buffer);
278 ie_blob_free(&input_blob);
282 for (
int i = 0;
i < input_shape.rank;
i++)
285 input.data = blob_buffer.buffer;
295 for (
int i = 0;
i <
ctx->options.batch_size; ++
i) {
305 ov_tensor_free(tensor);
306 status = ov_tensor_create(precision, input_shape, &tensor);
307 ov_shape_free(&input_shape);
347 ov_tensor_free(tensor);
349 ie_blob_free(&input_blob);
367 ov_tensor_t *output_tensor;
368 ov_shape_t output_shape = {0};
369 ov_element_type_e precision;
383 "Failed to get output tensor.");
390 "Failed to get output data.");
394 status = ov_tensor_get_shape(output_tensor, &output_shape);
399 dims = output_shape.dims;
409 outputs[
i].dims[1] = output_shape.rank > 2 ? dims[output_shape.rank - 3] : 1;
410 outputs[
i].dims[2] = output_shape.rank > 1 ? dims[output_shape.rank - 2] : 1;
411 outputs[
i].dims[3] = output_shape.rank > 0 ? dims[output_shape.rank - 1] : 1;
416 ov_shape_free(&output_shape);
417 ov_tensor_free(output_tensor);
418 output_tensor =
NULL;
423 ie_blob_t *output_blob =
NULL;
424 ie_blob_buffer_t blob_buffer;
425 precision_e precision;
430 "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
435 status = ie_blob_get_buffer(output_blob, &blob_buffer);
437 ie_blob_free(&output_blob);
442 status |= ie_blob_get_dims(output_blob, &dims);
443 status |= ie_blob_get_precision(output_blob, &precision);
445 ie_blob_free(&output_blob);
449 output.data = blob_buffer.buffer;
451 for (
int i = 0;
i < 4;
i++)
494 for (
int output_i = 0; output_i < ov_model->
nb_outputs; output_i++)
514 ov_shape_free(&output_shape);
516 ov_tensor_free(output_tensor);
518 ie_blob_free(&output_blob);
537 if (!model || !*model)
540 ov_model = (*model)->
model;
570 ov_output_const_port_free(ov_model->
input_port);
576 ov_preprocess_prepostprocessor_free(ov_model->
preprocess);
582 ov_core_free(ov_model->
core);
587 ie_network_free(&ov_model->
network);
589 ie_core_free(&ov_model->
core);
605 ov_preprocess_input_tensor_info_t* input_tensor_info =
NULL;
606 ov_preprocess_output_tensor_info_t* output_tensor_info =
NULL;
607 ov_preprocess_input_model_info_t* input_model_info =
NULL;
608 ov_model_t *tmp_ov_model;
609 ov_layout_t* NHWC_layout =
NULL;
610 ov_layout_t* NCHW_layout =
NULL;
611 const char* NHWC_desc =
"NHWC";
612 const char* NCHW_desc =
"NCHW";
613 const char* device =
ctx->options.device_type;
616 ie_available_devices_t a_dev;
618 char *all_dev_names =
NULL;
624 if (
ctx->options.batch_size <= 0) {
625 ctx->options.batch_size = 1;
628 if (
ctx->options.batch_size > 1) {
630 "change batch_size to 1.\n");
631 ctx->options.batch_size = 1;
651 status = ov_preprocess_input_info_get_tensor_info(ov_model->
input_info, &input_tensor_info);
659 status = ov_layout_create(NHWC_desc, &NHWC_layout);
660 status |= ov_layout_create(NCHW_desc, &NCHW_layout);
667 status = ov_preprocess_input_tensor_info_set_layout(input_tensor_info, NHWC_layout);
674 status = ov_preprocess_input_info_get_model_info(ov_model->
input_info, &input_model_info);
681 status = ov_preprocess_input_model_info_set_layout(input_model_info, NCHW_layout);
683 status = ov_preprocess_input_model_info_set_layout(input_model_info, NHWC_layout);
690 status = ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, U8);
705 nb_outputs = output_size;
708 for (
int i = 0;
i < nb_outputs;
i++) {
710 status = ov_preprocess_prepostprocessor_get_output_info_by_name(
713 status = ov_preprocess_prepostprocessor_get_output_info_by_index(
720 status |= ov_preprocess_output_info_get_tensor_info(ov_model->
output_info, &output_tensor_info);
727 status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
729 status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
731 status |= ov_preprocess_output_set_element_type(output_tensor_info, U8);
737 ov_preprocess_output_tensor_info_free(output_tensor_info);
738 output_tensor_info =
NULL;
739 ov_preprocess_output_info_free(ov_model->
output_info);
744 ov_preprocess_preprocess_steps_t* input_process_steps =
NULL;
745 status = ov_preprocess_input_info_get_preprocess_steps(ov_model->
input_info, &input_process_steps);
751 status = ov_preprocess_preprocess_steps_convert_element_type(input_process_steps, F32);
752 status |= ov_preprocess_preprocess_steps_mean(input_process_steps,
ctx->options.mean);
753 status |= ov_preprocess_preprocess_steps_scale(input_process_steps,
ctx->options.scale);
756 ov_preprocess_preprocess_steps_free(input_process_steps);
757 input_process_steps =
NULL;
761 ov_preprocess_preprocess_steps_free(input_process_steps);
762 input_process_steps =
NULL;
764 ov_preprocess_input_tensor_info_free(input_tensor_info);
765 input_tensor_info =
NULL;
766 ov_preprocess_input_info_free(ov_model->
input_info);
775 ov_model_free(tmp_ov_model);
780 ov_model_free(tmp_ov_model);
790 for (
int i = 0;
i < nb_outputs;
i++) {
795 for (
int i = 0;
i < nb_outputs;
i++) {
798 status = ov_model_const_output_by_name(ov_model->
ov_model, output_names[
i],
822 ov_preprocess_input_model_info_free(input_model_info);
823 input_model_info =
NULL;
824 ov_layout_free(NCHW_layout);
825 ov_layout_free(NHWC_layout);
827 if (
ctx->options.batch_size > 1) {
828 input_shapes_t input_shapes;
829 status = ie_network_get_input_shapes(ov_model->
network, &input_shapes);
834 for (
int i = 0;
i < input_shapes.shape_num;
i++)
835 input_shapes.shapes[
i].shape.dims[0] =
ctx->options.batch_size;
836 status = ie_network_reshape(ov_model->
network, input_shapes);
837 ie_network_input_shapes_free(&input_shapes);
846 status = ie_network_set_input_layout(ov_model->
network, input_name, NHWC);
848 if (
status == NOT_FOUND) {
857 status = ie_network_set_output_layout(ov_model->
network, output_name, NHWC);
859 if (
status == NOT_FOUND) {
877 status = ie_network_set_input_precision(ov_model->
network, input_name, U8);
888 status = ie_core_get_available_devices(ov_model->
core, &a_dev);
894 for (
int i = 0;
i < a_dev.num_devices;
i++) {
895 APPEND_STRING(all_dev_names, a_dev.devices[
i])
898 ctx->options.device_type, all_dev_names);
904 if (
ctx->options.nireq <= 0) {
915 for (
int i = 0;
i <
ctx->options.nireq;
i++) {
972 if (output_tensor_info)
973 ov_preprocess_output_tensor_info_free(output_tensor_info);
975 ov_preprocess_output_info_free(ov_model->
output_info);
977 ov_layout_free(NCHW_layout);
979 ov_layout_free(NHWC_layout);
980 if (input_model_info)
981 ov_preprocess_input_model_info_free(input_model_info);
1011 task = lltask->
task;
1012 ov_model = task->
model;
1088 int input_resizable =
ctx->options.input_resizable;
1091 ov_shape_t input_shape = {0};
1092 ov_element_type_e precision;
1112 for (
int i = 0;
i < 4;
i++)
1113 input->dims[
i] = input_shape.dims[
i];
1114 if (input_resizable) {
1119 if (input_shape.dims[1] <= 3)
1125 ov_shape_free(&input_shape);
1128 char *model_input_name =
NULL;
1130 size_t model_input_count = 0;
1132 precision_e precision;
1133 status = ie_network_get_inputs_number(ov_model->
network, &model_input_count);
1138 for (
size_t i = 0;
i < model_input_count;
i++) {
1139 status = ie_network_get_input_name(ov_model->
network,
i, &model_input_name);
1144 if (strcmp(model_input_name, input_name) == 0) {
1145 ie_network_name_free(&model_input_name);
1146 status |= ie_network_get_input_dims(ov_model->
network, input_name, &dims);
1147 status |= ie_network_get_input_precision(ov_model->
network, input_name, &precision);
1153 for (
int i = 0;
i < 4;
i++)
1154 input->dims[
i] = input_shape.dims[
i];
1155 if (input_resizable) {
1160 if (input_shape.dims[1] <= 3)
1169 ie_network_name_free(&model_input_name);
1197 for (uint32_t
i = 0;
i <
header->nb_bboxes;
i++) {
1199 if (bbox->
x < 0 || bbox->
w < 0 || bbox->
x + bbox->
w >=
frame->width) {
1202 if (bbox->
y < 0 || bbox->
h < 0 || bbox->
y + bbox->
h >=
frame->height) {
1216 switch (func_type) {
1226 lltask->
task = task;
1265 lltask->
task = task;
1280 static int get_output_ov(
void *model,
const char *input_name,
int input_width,
int input_height,
1281 const char *output_name,
int *output_width,
int *output_height)
1284 ov_dimension_t dims[4] = {{1, 1}, {1, 1}, {input_height, input_height}, {input_width, input_width}};
1286 ov_shape_t input_shape = {0};
1287 ov_partial_shape_t partial_shape;
1290 input_shapes_t input_shapes;
1299 .output_names = output_name ? &output_name :
NULL,
1311 if (
ctx->options.input_resizable) {
1312 status = ov_partial_shape_create(4, dims, &partial_shape);
1322 input_shape.dims[2] = input_height;
1323 input_shape.dims[3] = input_width;
1325 status = ov_shape_to_partial_shape(input_shape, &partial_shape);
1326 ov_shape_free(&input_shape);
1332 status = ov_model_reshape_single_input(ov_model->
ov_model, partial_shape);
1333 ov_partial_shape_free(&partial_shape);
1342 if (
ctx->options.input_resizable) {
1343 status = ie_network_get_input_shapes(ov_model->
network, &input_shapes);
1344 input_shapes.shapes->shape.dims[2] = input_height;
1345 input_shapes.shapes->shape.dims[3] = input_width;
1346 status |= ie_network_reshape(ov_model->
network, input_shapes);
1347 ie_network_input_shapes_free(&input_shapes);
1395 ov_core_t* core =
NULL;
1396 ov_model_t* ovmodel =
NULL;
1399 size_t node_count = 0;
1400 char *node_name =
NULL;
1414 model->
model = ov_model;
1415 ov_model->
model = model;
1416 ov_model->
ctx.
class = &dnn_openvino_class;
1427 status = ov_core_create(&core);
1431 ov_model->
core = core;
1433 status = ov_core_read_model(core, model_filename,
NULL, &ovmodel);
1436 status = ov_get_openvino_version(&ver);
1438 "Please check if the model version matches the runtime OpenVINO Version:\n",
1443 ov_version_free(&ver);
1451 status = ie_core_create(
"", &ov_model->
core);
1458 ver = ie_c_api_version();
1460 "Please check if the model version matches the runtime OpenVINO %s\n",
1461 model_filename, ver.api_version);
1462 ie_version_free(&ver);
1467 status = ie_network_get_inputs_number(ov_model->
network, &node_count);
1472 for (
size_t i = 0;
i < node_count;
i++) {
1473 status = ie_network_get_input_name(ov_model->
network,
i, &node_name);
1479 ie_network_name_free(&node_name);
1481 status = ie_network_get_outputs_number(ov_model->
network, &node_count);
1486 for (
size_t i = 0;
i < node_count;
i++) {
1487 status = ie_network_get_output_name(ov_model->
network,
i, &node_name);
1493 ie_network_name_free(&node_name);
1560 if (
ctx->options.async) {
1584 if (
ctx->options.batch_size > 1) {
1604 static int dnn_flush_ov(
const DNNModel *model)
1656 .execute_model = dnn_execute_model_ov,
1657 .get_result = dnn_get_result_ov,
1658 .flush = dnn_flush_ov,