36 #include <openvino/c/openvino.h>
38 #include <c_api/ie_c_api.h>
80 #define APPEND_STRING(generated_string, iterate_string) \
81 generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
82 av_asprintf("%s", iterate_string);
84 #define OFFSET(x) offsetof(OVOptions, x)
85 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
93 {
"scale",
"Add scale preprocess operation. Divide each element of input by specified value.",
OFFSET(
scale),
AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX,
FLAGS},
94 {
"mean",
"Add mean preprocess operation. Subtract specified value from each element of input.",
OFFSET(
mean),
AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX,
FLAGS},
104 { OK, 0,
"success" },
106 { NOT_IMPLEMENTED,
AVERROR(ENOSYS),
"not implemented" },
108 { PARAMETER_MISMATCH,
AVERROR(EINVAL),
"parameter mismatch" },
110 { OUT_OF_BOUNDS,
AVERROR(EOVERFLOW),
"out of bounds" },
112 { REQUEST_BUSY,
AVERROR(EBUSY),
"request busy" },
113 { RESULT_NOT_READY,
AVERROR(EBUSY),
"result not ready" },
114 { NOT_ALLOCATED,
AVERROR(ENODATA),
"not allocated" },
117 { INFER_CANCELLED,
AVERROR(ECANCELED),
"infer cancelled" },
118 { INVALID_C_PARAM,
AVERROR(EINVAL),
"invalid C parameter" },
120 { NOT_IMPLEMENT_C_METHOD,
AVERROR(ENOSYS),
"not implement C method" },
135 *
desc =
"unknown error";
167 return sizeof(
float);
169 return sizeof(uint8_t);
185 ov_tensor_t* tensor =
NULL;
186 ov_shape_t input_shape = {0};
187 ov_element_type_e precision;
191 precision_e precision;
192 ie_blob_buffer_t blob_buffer;
194 ie_blob_t *input_blob =
NULL;
204 ov_output_const_port_free(ov_model->
input_port);
229 dims = input_shape.dims;
233 ov_shape_free(&input_shape);
236 for (
int i = 0;
i < input_shape.rank;
i++)
247 status |= ie_blob_get_dims(input_blob, &dims);
248 status |= ie_blob_get_precision(input_blob, &precision);
250 ie_blob_free(&input_blob);
255 status = ie_blob_get_buffer(input_blob, &blob_buffer);
257 ie_blob_free(&input_blob);
261 for (
int i = 0;
i < input_shape.rank;
i++)
264 input.data = blob_buffer.buffer;
274 for (
int i = 0;
i <
ctx->ov_option.batch_size; ++
i) {
284 ov_tensor_free(tensor);
285 status = ov_tensor_create(precision, input_shape, &tensor);
286 ov_shape_free(&input_shape);
326 ov_tensor_free(tensor);
328 ie_blob_free(&input_blob);
346 ov_tensor_t *output_tensor;
347 ov_shape_t output_shape = {0};
348 ov_element_type_e precision;
362 "Failed to get output tensor.");
369 "Failed to get output data.");
373 status = ov_tensor_get_shape(output_tensor, &output_shape);
378 dims = output_shape.dims;
388 outputs[
i].dims[1] = output_shape.rank > 2 ? dims[output_shape.rank - 3] : 1;
389 outputs[
i].dims[2] = output_shape.rank > 1 ? dims[output_shape.rank - 2] : 1;
390 outputs[
i].dims[3] = output_shape.rank > 0 ? dims[output_shape.rank - 1] : 1;
395 ov_shape_free(&output_shape);
396 ov_tensor_free(output_tensor);
397 output_tensor =
NULL;
402 ie_blob_t *output_blob =
NULL;
403 ie_blob_buffer_t blob_buffer;
404 precision_e precision;
409 "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
414 status = ie_blob_get_buffer(output_blob, &blob_buffer);
416 ie_blob_free(&output_blob);
421 status |= ie_blob_get_dims(output_blob, &dims);
422 status |= ie_blob_get_precision(output_blob, &precision);
424 ie_blob_free(&output_blob);
428 output.data = blob_buffer.buffer;
430 for (
int i = 0;
i < 4;
i++)
473 for (
int output_i = 0; output_i < ov_model->
nb_outputs; output_i++)
493 ov_shape_free(&output_shape);
495 ov_tensor_free(output_tensor);
497 ie_blob_free(&output_blob);
516 if (!model || !*model)
519 ov_model = (
OVModel *)(*model);
549 ov_output_const_port_free(ov_model->
input_port);
555 ov_preprocess_prepostprocessor_free(ov_model->
preprocess);
561 ov_core_free(ov_model->
core);
566 ie_network_free(&ov_model->
network);
568 ie_core_free(&ov_model->
core);
583 ov_preprocess_input_tensor_info_t* input_tensor_info =
NULL;
584 ov_preprocess_output_tensor_info_t* output_tensor_info =
NULL;
585 ov_preprocess_input_model_info_t* input_model_info =
NULL;
586 ov_model_t *tmp_ov_model;
587 ov_layout_t* NHWC_layout =
NULL;
588 ov_layout_t* NCHW_layout =
NULL;
589 const char* NHWC_desc =
"NHWC";
590 const char* NCHW_desc =
"NCHW";
591 const char* device =
ctx->device ?
ctx->device :
"CPU";
594 ie_available_devices_t a_dev;
596 char *all_dev_names =
NULL;
602 if (
ctx->ov_option.batch_size <= 0) {
603 ctx->ov_option.batch_size = 1;
606 if (
ctx->ov_option.batch_size > 1) {
608 "change batch_size to 1.\n");
609 ctx->ov_option.batch_size = 1;
629 status = ov_preprocess_input_info_get_tensor_info(ov_model->
input_info, &input_tensor_info);
637 status = ov_layout_create(NHWC_desc, &NHWC_layout);
638 status |= ov_layout_create(NCHW_desc, &NCHW_layout);
645 status = ov_preprocess_input_tensor_info_set_layout(input_tensor_info, NHWC_layout);
652 status = ov_preprocess_input_info_get_model_info(ov_model->
input_info, &input_model_info);
659 status = ov_preprocess_input_model_info_set_layout(input_model_info, NCHW_layout);
661 status = ov_preprocess_input_model_info_set_layout(input_model_info, NHWC_layout);
668 status = ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, U8);
683 nb_outputs = output_size;
686 for (
int i = 0;
i < nb_outputs;
i++) {
688 status = ov_preprocess_prepostprocessor_get_output_info_by_name(
691 status = ov_preprocess_prepostprocessor_get_output_info_by_index(
698 status |= ov_preprocess_output_info_get_tensor_info(ov_model->
output_info, &output_tensor_info);
705 status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
706 else if (
fabsf(
ctx->ov_option.scale - 1) > 1e-6
f ||
fabsf(
ctx->ov_option.mean) > 1e-6
f)
707 status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
709 status |= ov_preprocess_output_set_element_type(output_tensor_info, U8);
715 ov_preprocess_output_tensor_info_free(output_tensor_info);
716 output_tensor_info =
NULL;
717 ov_preprocess_output_info_free(ov_model->
output_info);
722 ov_preprocess_preprocess_steps_t* input_process_steps =
NULL;
723 status = ov_preprocess_input_info_get_preprocess_steps(ov_model->
input_info, &input_process_steps);
729 status = ov_preprocess_preprocess_steps_convert_element_type(input_process_steps, F32);
730 status |= ov_preprocess_preprocess_steps_mean(input_process_steps,
ctx->ov_option.mean);
731 status |= ov_preprocess_preprocess_steps_scale(input_process_steps,
ctx->ov_option.scale);
734 ov_preprocess_preprocess_steps_free(input_process_steps);
735 input_process_steps =
NULL;
739 ov_preprocess_preprocess_steps_free(input_process_steps);
740 input_process_steps =
NULL;
742 ov_preprocess_input_tensor_info_free(input_tensor_info);
743 input_tensor_info =
NULL;
744 ov_preprocess_input_info_free(ov_model->
input_info);
753 ov_model_free(tmp_ov_model);
758 ov_model_free(tmp_ov_model);
768 for (
int i = 0;
i < nb_outputs;
i++) {
773 for (
int i = 0;
i < nb_outputs;
i++) {
776 status = ov_model_const_output_by_name(ov_model->
ov_model, output_names[
i],
800 ov_preprocess_input_model_info_free(input_model_info);
801 input_model_info =
NULL;
802 ov_layout_free(NCHW_layout);
803 ov_layout_free(NHWC_layout);
805 if (
ctx->ov_option.batch_size > 1) {
806 input_shapes_t input_shapes;
807 status = ie_network_get_input_shapes(ov_model->
network, &input_shapes);
812 for (
int i = 0;
i < input_shapes.shape_num;
i++)
813 input_shapes.shapes[
i].shape.dims[0] =
ctx->ov_option.batch_size;
814 status = ie_network_reshape(ov_model->
network, input_shapes);
815 ie_network_input_shapes_free(&input_shapes);
824 status = ie_network_set_input_layout(ov_model->
network, input_name, NHWC);
826 if (
status == NOT_FOUND) {
835 status = ie_network_set_output_layout(ov_model->
network, output_name, NHWC);
837 if (
status == NOT_FOUND) {
855 status = ie_network_set_input_precision(ov_model->
network, input_name, U8);
866 status = ie_core_get_available_devices(ov_model->
core, &a_dev);
872 for (
int i = 0;
i < a_dev.num_devices;
i++) {
873 APPEND_STRING(all_dev_names, a_dev.devices[
i])
876 ctx->device, all_dev_names);
882 if (
ctx->nireq <= 0) {
893 for (
int i = 0;
i <
ctx->nireq;
i++) {
950 if (output_tensor_info)
951 ov_preprocess_output_tensor_info_free(output_tensor_info);
953 ov_preprocess_output_info_free(ov_model->
output_info);
955 ov_layout_free(NCHW_layout);
957 ov_layout_free(NHWC_layout);
958 if (input_model_info)
959 ov_preprocess_input_model_info_free(input_model_info);
989 ov_model = task->
model;
1065 int input_resizable =
ctx->ov_option.input_resizable;
1068 ov_shape_t input_shape = {0};
1069 ov_element_type_e precision;
1089 for (
int i = 0;
i < 4;
i++)
1090 input->dims[
i] = input_shape.dims[
i];
1091 if (input_resizable) {
1096 if (input_shape.dims[1] <= 3)
1102 ov_shape_free(&input_shape);
1105 char *model_input_name =
NULL;
1107 size_t model_input_count = 0;
1109 precision_e precision;
1110 status = ie_network_get_inputs_number(ov_model->
network, &model_input_count);
1115 for (
size_t i = 0;
i < model_input_count;
i++) {
1116 status = ie_network_get_input_name(ov_model->
network,
i, &model_input_name);
1121 if (strcmp(model_input_name, input_name) == 0) {
1122 ie_network_name_free(&model_input_name);
1123 status |= ie_network_get_input_dims(ov_model->
network, input_name, &dims);
1124 status |= ie_network_get_input_precision(ov_model->
network, input_name, &precision);
1130 for (
int i = 0;
i < 4;
i++)
1131 input->dims[
i] = input_shape.dims[
i];
1132 if (input_resizable) {
1137 if (input_shape.dims[1] <= 3)
1146 ie_network_name_free(&model_input_name);
1174 for (uint32_t
i = 0;
i <
header->nb_bboxes;
i++) {
1176 if (bbox->
x < 0 || bbox->
w < 0 || bbox->
x + bbox->
w >=
frame->width) {
1179 if (bbox->
y < 0 || bbox->
h < 0 || bbox->
y + bbox->
h >=
frame->height) {
1193 switch (func_type) {
1203 lltask->
task = task;
1242 lltask->
task = task;
1258 const char *output_name,
int *output_width,
int *output_height)
1261 ov_dimension_t dims[4] = {{1, 1}, {1, 1}, {input_height, input_height}, {input_width, input_width}};
1263 ov_shape_t input_shape = {0};
1264 ov_partial_shape_t partial_shape;
1267 input_shapes_t input_shapes;
1276 .output_names = output_name ? &output_name :
NULL,
1288 if (
ctx->ov_option.input_resizable) {
1289 status = ov_partial_shape_create(4, dims, &partial_shape);
1299 input_shape.dims[2] = input_height;
1300 input_shape.dims[3] = input_width;
1302 status = ov_shape_to_partial_shape(input_shape, &partial_shape);
1303 ov_shape_free(&input_shape);
1309 status = ov_model_reshape_single_input(ov_model->
ov_model, partial_shape);
1310 ov_partial_shape_free(&partial_shape);
1319 if (
ctx->ov_option.input_resizable) {
1320 status = ie_network_get_input_shapes(ov_model->
network, &input_shapes);
1321 input_shapes.shapes->shape.dims[2] = input_height;
1322 input_shapes.shapes->shape.dims[3] = input_width;
1323 status |= ie_network_reshape(ov_model->
network, input_shapes);
1324 ie_network_input_shapes_free(&input_shapes);
1371 ov_core_t* core =
NULL;
1372 ov_model_t* ovmodel =
NULL;
1375 size_t node_count = 0;
1376 char *node_name =
NULL;
1384 model = &ov_model->
model;
1387 status = ov_core_create(&core);
1391 ov_model->
core = core;
1393 status = ov_core_read_model(core,
ctx->model_filename,
NULL, &ovmodel);
1396 status = ov_get_openvino_version(&ver);
1398 "Please check if the model version matches the runtime OpenVINO Version:\n",
1399 ctx->model_filename);
1403 ov_version_free(&ver);
1411 status = ie_core_create(
"", &ov_model->
core);
1418 ver = ie_c_api_version();
1420 "Please check if the model version matches the runtime OpenVINO %s\n",
1421 ctx->model_filename, ver.api_version);
1422 ie_version_free(&ver);
1427 status = ie_network_get_inputs_number(ov_model->
network, &node_count);
1432 for (
size_t i = 0;
i < node_count;
i++) {
1433 status = ie_network_get_input_name(ov_model->
network,
i, &node_name);
1439 ie_network_name_free(&node_name);
1441 status = ie_network_get_outputs_number(ov_model->
network, &node_count);
1446 for (
size_t i = 0;
i < node_count;
i++) {
1447 status = ie_network_get_output_name(ov_model->
network,
i, &node_name);
1453 ie_network_name_free(&node_name);
1543 if (
ctx->ov_option.batch_size > 1) {
1563 static int dnn_flush_ov(
const DNNModel *model)
1616 .load_model = dnn_load_model_ov,
1617 .execute_model = dnn_execute_model_ov,
1618 .get_result = dnn_get_result_ov,
1619 .flush = dnn_flush_ov,