FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_io_proc.h"
27 #include "libavformat/avio.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/avstring.h"
33 #include "../internal.h"
34 #include "safe_queue.h"
35 #if HAVE_OPENVINO2
36 #include <openvino/c/openvino.h>
37 #else
38 #include <c_api/ie_c_api.h>
39 #endif
40 #include "dnn_backend_common.h"
41 
42 typedef struct OVOptions{
43  char *device_type;
44  int nireq;
45  uint8_t async;
49  float scale;
50  float mean;
51 } OVOptions;
52 
53 typedef struct OVContext {
54  const AVClass *class;
56 } OVContext;
57 
58 typedef struct OVModel{
61 #if HAVE_OPENVINO2
62  ov_core_t *core;
63  ov_model_t *ov_model;
64  ov_compiled_model_t *compiled_model;
65  ov_output_const_port_t* input_port;
66  ov_preprocess_input_info_t* input_info;
67  ov_output_const_port_t* output_port;
68  ov_preprocess_output_info_t* output_info;
69  ov_preprocess_prepostprocessor_t* preprocess;
70 #else
71  ie_core_t *core;
72  ie_network_t *network;
73  ie_executable_network_t *exe_network;
74  const char *all_input_names;
75  const char *all_output_names;
76 #endif
77  SafeQueue *request_queue; // holds OVRequestItem
78  Queue *task_queue; // holds TaskItem
79  Queue *lltask_queue; // holds LastLevelTaskItem
80 } OVModel;
81 
82 // one request for one call to openvino
83 typedef struct OVRequestItem {
85  uint32_t lltask_count;
86 #if HAVE_OPENVINO2
87  ov_infer_request_t *infer_request;
88  ov_callback_t callback;
89 #else
90  ie_complete_call_back_t callback;
91  ie_infer_request_t *infer_request;
92 #endif
94 
95 #define APPEND_STRING(generated_string, iterate_string) \
96  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
97  av_asprintf("%s", iterate_string);
98 
99 #define OFFSET(x) offsetof(OVContext, x)
100 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
101 static const AVOption dnn_openvino_options[] = {
102  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
104  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
105  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
106  { "layout", "input layout of model", OFFSET(options.layout), AV_OPT_TYPE_INT, { .i64 = DL_NONE}, DL_NONE, DL_NHWC, FLAGS, "layout" },
107  { "none", "none", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NONE }, 0, 0, FLAGS, "layout"},
108  { "nchw", "nchw", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NCHW }, 0, 0, FLAGS, "layout"},
109  { "nhwc", "nhwc", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NHWC }, 0, 0, FLAGS, "layout"},
110  { "scale", "Add scale preprocess operation. Divide each element of input by specified value.", OFFSET(options.scale), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX, FLAGS},
111  { "mean", "Add mean preprocess operation. Subtract specified value from each element of input.", OFFSET(options.mean), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX, FLAGS},
112  { NULL }
113 };
114 
115 AVFILTER_DEFINE_CLASS(dnn_openvino);
116 
117 #if HAVE_OPENVINO2
118 static const struct {
119  ov_status_e status;
120  int av_err;
121  const char *desc;
122 } ov2_errors[] = {
123  { OK, 0, "success" },
124  { GENERAL_ERROR, AVERROR_EXTERNAL, "general error" },
125  { NOT_IMPLEMENTED, AVERROR(ENOSYS), "not implemented" },
126  { NETWORK_NOT_LOADED, AVERROR_EXTERNAL, "network not loaded" },
127  { PARAMETER_MISMATCH, AVERROR(EINVAL), "parameter mismatch" },
128  { NOT_FOUND, AVERROR_EXTERNAL, "not found" },
129  { OUT_OF_BOUNDS, AVERROR(EOVERFLOW), "out of bounds" },
130  { UNEXPECTED, AVERROR_EXTERNAL, "unexpected" },
131  { REQUEST_BUSY, AVERROR(EBUSY), "request busy" },
132  { RESULT_NOT_READY, AVERROR(EBUSY), "result not ready" },
133  { NOT_ALLOCATED, AVERROR(ENODATA), "not allocated" },
134  { INFER_NOT_STARTED, AVERROR_EXTERNAL, "infer not started" },
135  { NETWORK_NOT_READ, AVERROR_EXTERNAL, "network not read" },
136  { INFER_CANCELLED, AVERROR(ECANCELED), "infer cancelled" },
137  { INVALID_C_PARAM, AVERROR(EINVAL), "invalid C parameter" },
138  { UNKNOWN_C_ERROR, AVERROR_UNKNOWN, "unknown C error" },
139  { NOT_IMPLEMENT_C_METHOD, AVERROR(ENOSYS), "not implement C method" },
140  { UNKNOW_EXCEPTION, AVERROR_UNKNOWN, "unknown exception" },
141 };
142 
143 static int ov2_map_error(ov_status_e status, const char **desc)
144 {
145  int i;
146  for (i = 0; i < FF_ARRAY_ELEMS(ov2_errors); i++) {
147  if (ov2_errors[i].status == status) {
148  if (desc)
149  *desc = ov2_errors[i].desc;
150  return ov2_errors[i].av_err;
151  }
152  }
153  if (desc)
154  *desc = "unknown error";
155  return AVERROR_UNKNOWN;
156 }
157 #endif
158 
159 #if HAVE_OPENVINO2
160 static DNNDataType precision_to_datatype(ov_element_type_e precision)
161 #else
162 static DNNDataType precision_to_datatype(precision_e precision)
163 #endif
164 {
165  switch (precision)
166  {
167 #if HAVE_OPENVINO2
168  case F32:
169 #else
170  case FP32:
171 #endif
172  return DNN_FLOAT;
173  case U8:
174  return DNN_UINT8;
175  default:
176  av_assert0(!"not supported yet.");
177  return DNN_FLOAT;
178  }
179 }
180 
182 {
183  switch (dt)
184  {
185  case DNN_FLOAT:
186  return sizeof(float);
187  case DNN_UINT8:
188  return sizeof(uint8_t);
189  default:
190  av_assert0(!"not supported yet.");
191  return 1;
192  }
193 }
194 
195 static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
196 {
197  DNNData input;
198  LastLevelTaskItem *lltask;
199  TaskItem *task;
200  OVContext *ctx = &ov_model->ctx;
201 #if HAVE_OPENVINO2
202  int64_t* dims;
203  ov_status_e status;
204  ov_tensor_t* tensor = NULL;
205  ov_shape_t input_shape = {0};
206  ov_element_type_e precision;
207  void *input_data_ptr = NULL;
208 #else
209  dimensions_t dims;
210  precision_e precision;
211  ie_blob_buffer_t blob_buffer;
212  IEStatusCode status;
213  ie_blob_t *input_blob = NULL;
214 #endif
215 
216  memset(&input, 0, sizeof(input));
217  lltask = ff_queue_peek_front(ov_model->lltask_queue);
218  av_assert0(lltask);
219  task = lltask->task;
220 
221 #if HAVE_OPENVINO2
222  if (!ov_model_is_dynamic(ov_model->ov_model)) {
223  if (ov_model->input_port) {
224  ov_output_const_port_free(ov_model->input_port);
225  ov_model->input_port = NULL;
226  }
227  status = ov_model_const_input_by_name(ov_model->ov_model, task->input_name, &ov_model->input_port);
228  if (status != OK) {
229  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
230  return ov2_map_error(status, NULL);
231  }
232  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
233  if (status != OK) {
234  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
235  return ov2_map_error(status, NULL);
236  }
237  dims = input_shape.dims;
238  status = ov_port_get_element_type(ov_model->input_port, &precision);
239  if (status != OK) {
240  av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
241  ov_shape_free(&input_shape);
242  return ov2_map_error(status, NULL);
243  }
244  } else {
245  avpriv_report_missing_feature(ctx, "Do not support dynamic model.");
246  return AVERROR(ENOSYS);
247  }
248  input.height = dims[1];
249  input.width = dims[2];
250  input.channels = dims[3];
251  input.dt = precision_to_datatype(precision);
252  input.data = av_malloc(input.height * input.width * input.channels * get_datatype_size(input.dt));
253  if (!input.data) {
254  ov_shape_free(&input_shape);
255  return AVERROR(ENOMEM);
256  }
257  input_data_ptr = input.data;
258 #else
259  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
260  if (status != OK) {
261  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
262  return DNN_GENERIC_ERROR;
263  }
264 
265  status |= ie_blob_get_dims(input_blob, &dims);
266  status |= ie_blob_get_precision(input_blob, &precision);
267  if (status != OK) {
268  ie_blob_free(&input_blob);
269  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
270  return DNN_GENERIC_ERROR;
271  }
272 
273  status = ie_blob_get_buffer(input_blob, &blob_buffer);
274  if (status != OK) {
275  ie_blob_free(&input_blob);
276  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
277  return DNN_GENERIC_ERROR;
278  }
279  input.height = dims.dims[2];
280  input.width = dims.dims[3];
281  input.channels = dims.dims[1];
282  input.data = blob_buffer.buffer;
283  input.dt = precision_to_datatype(precision);
284 #endif
285  // all models in openvino open model zoo use BGR as input,
286  // change to be an option when necessary.
287  input.order = DCO_BGR;
288  // We use preprocess_steps to scale input data, so disable scale and mean here.
289  input.scale = 1;
290  input.mean = 0;
291 
292  for (int i = 0; i < ctx->options.batch_size; ++i) {
293  lltask = ff_queue_pop_front(ov_model->lltask_queue);
294  if (!lltask) {
295  break;
296  }
297  request->lltasks[i] = lltask;
298  request->lltask_count = i + 1;
299  task = lltask->task;
300  switch (ov_model->model->func_type) {
301  case DFT_PROCESS_FRAME:
302  if (task->do_ioproc) {
303  if (ov_model->model->frame_pre_proc != NULL) {
304  ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
305  } else {
307  }
308  }
309  break;
312  break;
315  break;
316  default:
317  av_assert0(!"should not reach here");
318  break;
319  }
320 #if HAVE_OPENVINO2
321  status = ov_tensor_create_from_host_ptr(precision, input_shape, input.data, &tensor);
322  ov_shape_free(&input_shape);
323  if (status != OK) {
324  av_log(ctx, AV_LOG_ERROR, "Failed to create tensor from host prt.\n");
325  return ov2_map_error(status, NULL);
326  }
327  status = ov_infer_request_set_input_tensor(request->infer_request, tensor);
328  if (status != OK) {
329  av_log(ctx, AV_LOG_ERROR, "Failed to Set an input tensor for the model.\n");
330  return ov2_map_error(status, NULL);
331  }
332 #endif
333  input.data = (uint8_t *)input.data
334  + input.width * input.height * input.channels * get_datatype_size(input.dt);
335  }
336 #if HAVE_OPENVINO2
337  av_freep(&input_data_ptr);
338 #else
339  ie_blob_free(&input_blob);
340 #endif
341 
342  return 0;
343 }
344 
345 static void infer_completion_callback(void *args)
346 {
347  OVRequestItem *request = args;
348  LastLevelTaskItem *lltask = request->lltasks[0];
349  TaskItem *task = lltask->task;
350  OVModel *ov_model = task->model;
351  SafeQueue *requestq = ov_model->request_queue;
352  DNNData output;
353  OVContext *ctx = &ov_model->ctx;
354 #if HAVE_OPENVINO2
355  size_t* dims;
356  ov_status_e status;
357  ov_tensor_t *output_tensor;
358  ov_shape_t output_shape = {0};
359  ov_element_type_e precision;
360 
361  memset(&output, 0, sizeof(output));
362  status = ov_infer_request_get_output_tensor_by_index(request->infer_request, 0, &output_tensor);
363  if (status != OK) {
365  "Failed to get output tensor.");
366  return;
367  }
368 
369  status = ov_tensor_data(output_tensor, &output.data);
370  if (status != OK) {
372  "Failed to get output data.");
373  return;
374  }
375 
376  status = ov_tensor_get_shape(output_tensor, &output_shape);
377  if (status != OK) {
378  av_log(ctx, AV_LOG_ERROR, "Failed to get output port shape.\n");
379  return;
380  }
381  dims = output_shape.dims;
382 
383  status = ov_port_get_element_type(ov_model->output_port, &precision);
384  if (status != OK) {
385  av_log(ctx, AV_LOG_ERROR, "Failed to get output port data type.\n");
386  ov_shape_free(&output_shape);
387  return;
388  }
389  output.channels = dims[1];
390  output.height = dims[2];
391  output.width = dims[3];
392  av_assert0(request->lltask_count <= dims[0]);
393  ov_shape_free(&output_shape);
394 #else
395  IEStatusCode status;
396  dimensions_t dims;
397  ie_blob_t *output_blob = NULL;
398  ie_blob_buffer_t blob_buffer;
399  precision_e precision;
400  status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
401  if (status != OK) {
403  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
404  task->output_names[0], ov_model->all_output_names);
405  return;
406  }
407 
408  status = ie_blob_get_buffer(output_blob, &blob_buffer);
409  if (status != OK) {
410  ie_blob_free(&output_blob);
411  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
412  return;
413  }
414 
415  status |= ie_blob_get_dims(output_blob, &dims);
416  status |= ie_blob_get_precision(output_blob, &precision);
417  if (status != OK) {
418  ie_blob_free(&output_blob);
419  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
420  return;
421  }
422  output.data = blob_buffer.buffer;
423  output.channels = dims.dims[1];
424  output.height = dims.dims[2];
425  output.width = dims.dims[3];
426  av_assert0(request->lltask_count <= dims.dims[0]);
427 #endif
428  output.dt = precision_to_datatype(precision);
429  output.layout = ctx->options.layout;
430  output.scale = ctx->options.scale;
431  output.mean = ctx->options.mean;
432 
433  av_assert0(request->lltask_count >= 1);
434  for (int i = 0; i < request->lltask_count; ++i) {
435  task = request->lltasks[i]->task;
436 
437  switch (ov_model->model->func_type) {
438  case DFT_PROCESS_FRAME:
439  if (task->do_ioproc) {
440  if (ov_model->model->frame_post_proc != NULL) {
441  ov_model->model->frame_post_proc(task->out_frame, &output, ov_model->model->filter_ctx);
442  } else {
444  }
445  } else {
446  task->out_frame->width = output.width;
447  task->out_frame->height = output.height;
448  }
449  break;
451  if (!ov_model->model->detect_post_proc) {
452  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
453  return;
454  }
455  ov_model->model->detect_post_proc(task->in_frame, &output, 1, ov_model->model->filter_ctx);
456  break;
458  if (!ov_model->model->classify_post_proc) {
459  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
460  return;
461  }
462  ov_model->model->classify_post_proc(task->in_frame, &output, request->lltasks[i]->bbox_index, ov_model->model->filter_ctx);
463  break;
464  default:
465  av_assert0(!"should not reach here");
466  break;
467  }
468 
469  task->inference_done++;
470  av_freep(&request->lltasks[i]);
471  output.data = (uint8_t *)output.data
472  + output.width * output.height * output.channels * get_datatype_size(output.dt);
473  }
474 #if !HAVE_OPENVINO2
475  ie_blob_free(&output_blob);
476 #endif
477  request->lltask_count = 0;
478  if (ff_safe_queue_push_back(requestq, request) < 0) {
479 #if HAVE_OPENVINO2
480  ov_infer_request_free(request->infer_request);
481 #else
482  ie_infer_request_free(&request->infer_request);
483 #endif
484  av_freep(&request);
485  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
486  return;
487  }
488 }
489 
490 static void dnn_free_model_ov(DNNModel **model)
491 {
492  OVModel *ov_model;
493 
494  if (!model || !*model)
495  return;
496 
497  ov_model = (*model)->model;
498  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
500  if (item && item->infer_request) {
501 #if HAVE_OPENVINO2
502  ov_infer_request_free(item->infer_request);
503 #else
504  ie_infer_request_free(&item->infer_request);
505 #endif
506  }
507  av_freep(&item->lltasks);
508  av_freep(&item);
509  }
511 
512  while (ff_queue_size(ov_model->lltask_queue) != 0) {
514  av_freep(&item);
515  }
516  ff_queue_destroy(ov_model->lltask_queue);
517 
518  while (ff_queue_size(ov_model->task_queue) != 0) {
519  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
520  av_frame_free(&item->in_frame);
521  av_frame_free(&item->out_frame);
522  av_freep(&item);
523  }
524  ff_queue_destroy(ov_model->task_queue);
525 #if HAVE_OPENVINO2
526  if (ov_model->input_port)
527  ov_output_const_port_free(ov_model->input_port);
528  if (ov_model->output_port)
529  ov_output_const_port_free(ov_model->output_port);
530  if (ov_model->preprocess)
531  ov_preprocess_prepostprocessor_free(ov_model->preprocess);
532  if (ov_model->compiled_model)
533  ov_compiled_model_free(ov_model->compiled_model);
534  if (ov_model->ov_model)
535  ov_model_free(ov_model->ov_model);
536  if (ov_model->core)
537  ov_core_free(ov_model->core);
538 #else
539  if (ov_model->exe_network)
540  ie_exec_network_free(&ov_model->exe_network);
541  if (ov_model->network)
542  ie_network_free(&ov_model->network);
543  if (ov_model->core)
544  ie_core_free(&ov_model->core);
545  av_free(ov_model->all_output_names);
546  av_free(ov_model->all_input_names);
547 #endif
548  av_opt_free(&ov_model->ctx);
549  av_freep(&ov_model);
550  av_freep(model);
551 }
552 
553 
554 static int init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
555 {
556  int ret = 0;
557  OVContext *ctx = &ov_model->ctx;
558 #if HAVE_OPENVINO2
559  ov_status_e status;
560  ov_preprocess_input_tensor_info_t* input_tensor_info = NULL;
561  ov_preprocess_output_tensor_info_t* output_tensor_info = NULL;
562  ov_preprocess_input_model_info_t* input_model_info = NULL;
563  ov_model_t *tmp_ov_model;
564  ov_layout_t* NHWC_layout = NULL;
565  ov_layout_t* NCHW_layout = NULL;
566  const char* NHWC_desc = "NHWC";
567  const char* NCHW_desc = "NCHW";
568  const char* device = ctx->options.device_type;
569 #else
570  IEStatusCode status;
571  ie_available_devices_t a_dev;
572  ie_config_t config = {NULL, NULL, NULL};
573  char *all_dev_names = NULL;
574 #endif
575  // We scale pixel by default when do frame processing.
576  if (fabsf(ctx->options.scale) < 1e-6f)
577  ctx->options.scale = ov_model->model->func_type == DFT_PROCESS_FRAME ? 255 : 1;
578  // batch size
579  if (ctx->options.batch_size <= 0) {
580  ctx->options.batch_size = 1;
581  }
582 #if HAVE_OPENVINO2
583  if (ctx->options.batch_size > 1) {
584  avpriv_report_missing_feature(ctx, "Do not support batch_size > 1 for now,"
585  "change batch_size to 1.\n");
586  ctx->options.batch_size = 1;
587  }
588 
589  status = ov_preprocess_prepostprocessor_create(ov_model->ov_model, &ov_model->preprocess);
590  if (status != OK) {
591  av_log(ctx, AV_LOG_ERROR, "Failed to create preprocess for ov_model.\n");
593  goto err;
594  }
595 
596  status = ov_preprocess_prepostprocessor_get_input_info_by_name(ov_model->preprocess, input_name, &ov_model->input_info);
597  status |= ov_preprocess_prepostprocessor_get_output_info_by_name(ov_model->preprocess, output_name, &ov_model->output_info);
598  if (status != OK) {
599  av_log(ctx, AV_LOG_ERROR, "Failed to get input/output info from preprocess.\n");
601  goto err;
602  }
603 
604  status = ov_preprocess_input_info_get_tensor_info(ov_model->input_info, &input_tensor_info);
605  status |= ov_preprocess_output_info_get_tensor_info(ov_model->output_info, &output_tensor_info);
606  if (status != OK) {
607  av_log(ctx, AV_LOG_ERROR, "Failed to get tensor info from input/output.\n");
609  goto err;
610  }
611 
612  //set input layout
613  status = ov_layout_create(NHWC_desc, &NHWC_layout);
614  status |= ov_layout_create(NCHW_desc, &NCHW_layout);
615  if (status != OK) {
616  av_log(ctx, AV_LOG_ERROR, "Failed to create layout for input.\n");
618  goto err;
619  }
620 
621  status = ov_preprocess_input_tensor_info_set_layout(input_tensor_info, NHWC_layout);
622  if (status != OK) {
623  av_log(ctx, AV_LOG_ERROR, "Failed to set input tensor layout\n");
625  goto err;
626  }
627 
628  status = ov_preprocess_input_info_get_model_info(ov_model->input_info, &input_model_info);
629  if (status != OK) {
630  av_log(ctx, AV_LOG_ERROR, "Failed to get input model info\n");
632  goto err;
633  }
634  if (ctx->options.layout == DL_NCHW)
635  status = ov_preprocess_input_model_info_set_layout(input_model_info, NCHW_layout);
636  else if (ctx->options.layout == DL_NHWC)
637  status = ov_preprocess_input_model_info_set_layout(input_model_info, NHWC_layout);
638  if (status != OK) {
639  av_log(ctx, AV_LOG_ERROR, "Failed to get set input model layout\n");
641  goto err;
642  }
643 
644  status = ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, U8);
645  if (ov_model->model->func_type != DFT_PROCESS_FRAME)
646  status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
647  else if (fabsf(ctx->options.scale - 1) > 1e-6f || fabsf(ctx->options.mean) > 1e-6f)
648  status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
649  else
650  status |= ov_preprocess_output_set_element_type(output_tensor_info, U8);
651  if (status != OK) {
652  av_log(ctx, AV_LOG_ERROR, "Failed to set input/output element type\n");
654  goto err;
655  }
656  // set preprocess steps.
657  if (fabsf(ctx->options.scale - 1) > 1e-6f || fabsf(ctx->options.mean) > 1e-6f) {
658  ov_preprocess_preprocess_steps_t* input_process_steps = NULL;
659  status = ov_preprocess_input_info_get_preprocess_steps(ov_model->input_info, &input_process_steps);
660  if (status != OK) {
661  av_log(ctx, AV_LOG_ERROR, "Failed to get preprocess steps\n");
663  goto err;
664  }
665  status = ov_preprocess_preprocess_steps_convert_element_type(input_process_steps, F32);
666  status |= ov_preprocess_preprocess_steps_mean(input_process_steps, ctx->options.mean);
667  status |= ov_preprocess_preprocess_steps_scale(input_process_steps, ctx->options.scale);
668  if (status != OK) {
669  av_log(ctx, AV_LOG_ERROR, "Failed to set preprocess steps\n");
671  goto err;
672  }
673  ov_preprocess_preprocess_steps_free(input_process_steps);
674  }
675 
676  //update model
677  if(ov_model->ov_model)
678  tmp_ov_model = ov_model->ov_model;
679  status = ov_preprocess_prepostprocessor_build(ov_model->preprocess, &ov_model->ov_model);
680  if (status != OK) {
681  av_log(ctx, AV_LOG_ERROR, "Failed to update OV model\n");
683  goto err;
684  }
685  ov_model_free(tmp_ov_model);
686 
687  //update output_port
688  if (ov_model->output_port) {
689  ov_output_const_port_free(ov_model->output_port);
690  ov_model->output_port = NULL;
691  }
692  status = ov_model_const_output_by_name(ov_model->ov_model, output_name, &ov_model->output_port);
693  if (status != OK) {
694  av_log(ctx, AV_LOG_ERROR, "Failed to get output port.\n");
695  goto err;
696  }
697  //compile network
698  status = ov_core_compile_model(ov_model->core, ov_model->ov_model, device, 0, &ov_model->compiled_model);
699  if (status != OK) {
701  goto err;
702  }
703  ov_preprocess_input_model_info_free(input_model_info);
704  ov_layout_free(NCHW_layout);
705  ov_layout_free(NHWC_layout);
706 #else
707  if (ctx->options.batch_size > 1) {
708  input_shapes_t input_shapes;
709  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
710  if (status != OK) {
712  goto err;
713  }
714  for (int i = 0; i < input_shapes.shape_num; i++)
715  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
716  status = ie_network_reshape(ov_model->network, input_shapes);
717  ie_network_input_shapes_free(&input_shapes);
718  if (status != OK) {
720  goto err;
721  }
722  }
723 
724  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
725  // while we pass NHWC data from FFmpeg to openvino
726  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
727  if (status != OK) {
728  if (status == NOT_FOUND) {
729  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set input layout as NHWC, "\
730  "all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
731  } else{
732  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
733  }
735  goto err;
736  }
737  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
738  if (status != OK) {
739  if (status == NOT_FOUND) {
740  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set output layout as NHWC, "\
741  "all output(s) are: \"%s\"\n", output_name, ov_model->all_output_names);
742  } else{
743  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
744  }
746  goto err;
747  }
748 
749  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
750  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
751  // ask openvino to do the conversion internally.
752  // the current supported SR model (frame processing) is generated from tensorflow model,
753  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
754  // TODO: we need to get a final clear&general solution with all backends/formats considered.
755  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
756  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
757  if (status != OK) {
758  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
760  goto err;
761  }
762  }
763 
764  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
765  if (status != OK) {
766  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
767  status = ie_core_get_available_devices(ov_model->core, &a_dev);
768  if (status != OK) {
769  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
771  goto err;
772  }
773  for (int i = 0; i < a_dev.num_devices; i++) {
774  APPEND_STRING(all_dev_names, a_dev.devices[i])
775  }
776  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
777  ctx->options.device_type, all_dev_names);
778  ret = AVERROR(ENODEV);
779  goto err;
780  }
781 #endif
782  // create infer_requests for async execution
783  if (ctx->options.nireq <= 0) {
784  // the default value is a rough estimation
785  ctx->options.nireq = av_cpu_count() / 2 + 1;
786  }
787 
788  ov_model->request_queue = ff_safe_queue_create();
789  if (!ov_model->request_queue) {
790  ret = AVERROR(ENOMEM);
791  goto err;
792  }
793 
794  for (int i = 0; i < ctx->options.nireq; i++) {
795  OVRequestItem *item = av_mallocz(sizeof(*item));
796  if (!item) {
797  ret = AVERROR(ENOMEM);
798  goto err;
799  }
800 
801 #if HAVE_OPENVINO2
802  item->callback.callback_func = infer_completion_callback;
803 #else
804  item->callback.completeCallBackFunc = infer_completion_callback;
805 #endif
806  item->callback.args = item;
807  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
808  av_freep(&item);
809  ret = AVERROR(ENOMEM);
810  goto err;
811  }
812 
813 #if HAVE_OPENVINO2
814  status = ov_compiled_model_create_infer_request(ov_model->compiled_model, &item->infer_request);
815  if (status != OK) {
816  av_log(ctx, AV_LOG_ERROR, "Failed to Creates an inference request object.\n");
817  goto err;
818  }
819 #else
820  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
821  if (status != OK) {
823  goto err;
824  }
825 #endif
826 
827  item->lltasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->lltasks));
828  if (!item->lltasks) {
829  ret = AVERROR(ENOMEM);
830  goto err;
831  }
832  item->lltask_count = 0;
833  }
834 
835  ov_model->task_queue = ff_queue_create();
836  if (!ov_model->task_queue) {
837  ret = AVERROR(ENOMEM);
838  goto err;
839  }
840 
841  ov_model->lltask_queue = ff_queue_create();
842  if (!ov_model->lltask_queue) {
843  ret = AVERROR(ENOMEM);
844  goto err;
845  }
846 
847  return 0;
848 
849 err:
850 #if HAVE_OPENVINO2
851  if (NCHW_layout)
852  ov_layout_free(NCHW_layout);
853  if (NHWC_layout)
854  ov_layout_free(NHWC_layout);
855  if (input_model_info)
856  ov_preprocess_input_model_info_free(input_model_info);
857 #endif
858  dnn_free_model_ov(&ov_model->model);
859  return ret;
860 }
861 
862 static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
863 {
864 #if HAVE_OPENVINO2
865  ov_status_e status;
866 #else
867  IEStatusCode status;
868 #endif
869  LastLevelTaskItem *lltask;
870  int ret = 0;
871  TaskItem *task;
872  OVContext *ctx;
873  OVModel *ov_model;
874 
875  if (ff_queue_size(inferenceq) == 0) {
876 #if HAVE_OPENVINO2
877  ov_infer_request_free(request->infer_request);
878 #else
879  ie_infer_request_free(&request->infer_request);
880 #endif
881  av_freep(&request);
882  return 0;
883  }
884 
885  lltask = ff_queue_peek_front(inferenceq);
886  task = lltask->task;
887  ov_model = task->model;
888  ctx = &ov_model->ctx;
889 
890  ret = fill_model_input_ov(ov_model, request);
891  if (ret != 0) {
892  goto err;
893  }
894 
895 #if HAVE_OPENVINO2
896  if (task->async) {
897  status = ov_infer_request_set_callback(request->infer_request, &request->callback);
898  if (status != OK) {
899  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
901  goto err;
902  }
903 
904  status = ov_infer_request_start_async(request->infer_request);
905  if (status != OK) {
906  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
908  goto err;
909  }
910  return 0;
911  } else {
912  status = ov_infer_request_infer(request->infer_request);
913  if (status != OK) {
914  av_log(NULL, AV_LOG_ERROR, "Failed to start synchronous model inference for OV2\n");
916  goto err;
917  }
918  infer_completion_callback(request);
919  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
920  }
921 #else
922  if (task->async) {
923  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
924  if (status != OK) {
925  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
927  goto err;
928  }
929  status = ie_infer_request_infer_async(request->infer_request);
930  if (status != OK) {
931  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
933  goto err;
934  }
935  return 0;
936  } else {
937  status = ie_infer_request_infer(request->infer_request);
938  if (status != OK) {
939  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
941  goto err;
942  }
943  infer_completion_callback(request);
944  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
945  }
946 #endif
947 err:
948  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
949 #if HAVE_OPENVINO2
950  ov_infer_request_free(request->infer_request);
951 #else
952  ie_infer_request_free(&request->infer_request);
953 #endif
954  av_freep(&request);
955  }
956  return ret;
957 }
958 
959 static int get_input_ov(void *model, DNNData *input, const char *input_name)
960 {
961  OVModel *ov_model = model;
962  OVContext *ctx = &ov_model->ctx;
963  int input_resizable = ctx->options.input_resizable;
964 
965 #if HAVE_OPENVINO2
966  ov_shape_t input_shape = {0};
967  ov_element_type_e precision;
968  int64_t* dims;
969  ov_status_e status;
970  if (!ov_model_is_dynamic(ov_model->ov_model)) {
971  status = ov_model_const_input_by_name(ov_model->ov_model, input_name, &ov_model->input_port);
972  if (status != OK) {
973  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
974  return ov2_map_error(status, NULL);
975  }
976 
977  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
978  if (status != OK) {
979  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
980  return ov2_map_error(status, NULL);
981  }
982  dims = input_shape.dims;
983 
984  status = ov_port_get_element_type(ov_model->input_port, &precision);
985  if (status != OK) {
986  av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
987  return ov2_map_error(status, NULL);
988  }
989  } else {
990  avpriv_report_missing_feature(ctx, "Do not support dynamic model now.");
991  return AVERROR(ENOSYS);
992  }
993 
994  input->channels = dims[1];
995  input->height = input_resizable ? -1 : dims[2];
996  input->width = input_resizable ? -1 : dims[3];
997  input->dt = precision_to_datatype(precision);
998 
999  return 0;
1000 #else
1001  char *model_input_name = NULL;
1002  IEStatusCode status;
1003  size_t model_input_count = 0;
1004  dimensions_t dims;
1005  precision_e precision;
1006  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
1007  if (status != OK) {
1008  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
1009  return DNN_GENERIC_ERROR;
1010  }
1011  for (size_t i = 0; i < model_input_count; i++) {
1012  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
1013  if (status != OK) {
1014  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
1015  return DNN_GENERIC_ERROR;
1016  }
1017  if (strcmp(model_input_name, input_name) == 0) {
1018  ie_network_name_free(&model_input_name);
1019  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
1020  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
1021  if (status != OK) {
1022  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
1023  return DNN_GENERIC_ERROR;
1024  }
1025 
1026  input->channels = dims.dims[1];
1027  input->height = input_resizable ? -1 : dims.dims[2];
1028  input->width = input_resizable ? -1 : dims.dims[3];
1029  input->dt = precision_to_datatype(precision);
1030  return 0;
1031  }
1032 
1033  ie_network_name_free(&model_input_name);
1034  }
1035 
1036  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
1037  return AVERROR(EINVAL);
1038 #endif
1039 }
1040 
1042 {
1043  AVFrameSideData *sd;
1045  const AVDetectionBBox *bbox;
1046 
1048  if (!sd) { // this frame has nothing detected
1049  return 0;
1050  }
1051 
1052  if (!sd->size) {
1053  return 0;
1054  }
1055 
1056  header = (const AVDetectionBBoxHeader *)sd->data;
1057  if (!header->nb_bboxes) {
1058  return 0;
1059  }
1060 
1061  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
1062  bbox = av_get_detection_bbox(header, i);
1063  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
1064  return 0;
1065  }
1066  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->width) {
1067  return 0;
1068  }
1069 
1071  return 0;
1072  }
1073  }
1074 
1075  return 1;
1076 }
1077 
1078 static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
1079 {
1080  switch (func_type) {
1081  case DFT_PROCESS_FRAME:
1082  case DFT_ANALYTICS_DETECT:
1083  {
1084  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
1085  if (!lltask) {
1086  return AVERROR(ENOMEM);
1087  }
1088  task->inference_todo = 1;
1089  task->inference_done = 0;
1090  lltask->task = task;
1091  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
1092  av_freep(&lltask);
1093  return AVERROR(ENOMEM);
1094  }
1095  return 0;
1096  }
1098  {
1100  AVFrame *frame = task->in_frame;
1101  AVFrameSideData *sd;
1103 
1104  task->inference_todo = 0;
1105  task->inference_done = 0;
1106 
1108  return 0;
1109  }
1110 
1112  header = (const AVDetectionBBoxHeader *)sd->data;
1113 
1114  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
1115  LastLevelTaskItem *lltask;
1117 
1118  if (params->target) {
1119  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
1120  continue;
1121  }
1122  }
1123 
1124  lltask = av_malloc(sizeof(*lltask));
1125  if (!lltask) {
1126  return AVERROR(ENOMEM);
1127  }
1128  task->inference_todo++;
1129  lltask->task = task;
1130  lltask->bbox_index = i;
1131  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
1132  av_freep(&lltask);
1133  return AVERROR(ENOMEM);
1134  }
1135  }
1136  return 0;
1137  }
1138  default:
1139  av_assert0(!"should not reach here");
1140  return AVERROR(EINVAL);
1141  }
1142 }
1143 
1144 static int get_output_ov(void *model, const char *input_name, int input_width, int input_height,
1145  const char *output_name, int *output_width, int *output_height)
1146 {
1147 #if HAVE_OPENVINO2
1148  ov_dimension_t dims[4] = {{1, 1}, {1, 1}, {input_height, input_height}, {input_width, input_width}};
1149  ov_status_e status;
1150  ov_shape_t input_shape = {0};
1151  ov_partial_shape_t partial_shape;
1152 #else
1153  IEStatusCode status;
1154  input_shapes_t input_shapes;
1155 #endif
1156  int ret;
1157  OVModel *ov_model = model;
1158  OVContext *ctx = &ov_model->ctx;
1159  TaskItem task;
1160  OVRequestItem *request;
1161  DNNExecBaseParams exec_params = {
1162  .input_name = input_name,
1163  .output_names = &output_name,
1164  .nb_output = 1,
1165  .in_frame = NULL,
1166  .out_frame = NULL,
1167  };
1168 
1169  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
1170  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
1171  return AVERROR(EINVAL);
1172  }
1173 
1174 #if HAVE_OPENVINO2
1175  if (ctx->options.input_resizable) {
1176  if (!ov_model_is_dynamic(ov_model->ov_model)) {
1177  status = ov_partial_shape_create(4, dims, &partial_shape);
1178  if (status != OK) {
1179  av_log(ctx, AV_LOG_ERROR, "Failed create partial shape.\n");
1180  return ov2_map_error(status, NULL);
1181  }
1182  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
1183  input_shape.dims[2] = input_height;
1184  input_shape.dims[3] = input_width;
1185  if (status != OK) {
1186  av_log(ctx, AV_LOG_ERROR, "Failed create shape for model input resize.\n");
1187  return ov2_map_error(status, NULL);
1188  }
1189 
1190  status = ov_shape_to_partial_shape(input_shape, &partial_shape);
1191  if (status != OK) {
1192  av_log(ctx, AV_LOG_ERROR, "Failed create partial shape for model input resize.\n");
1193  return ov2_map_error(status, NULL);
1194  }
1195 
1196  status = ov_model_reshape_single_input(ov_model->ov_model, partial_shape);
1197  if (status != OK) {
1198  av_log(ctx, AV_LOG_ERROR, "Failed to reszie model input.\n");
1199  return ov2_map_error(status, NULL);
1200  }
1201  } else {
1202  avpriv_report_missing_feature(ctx, "Do not support dynamic model.");
1203  return AVERROR(ENOTSUP);
1204  }
1205  }
1206 
1207  status = ov_model_const_output_by_name(ov_model->ov_model, output_name, &ov_model->output_port);
1208  if (status != OK) {
1209  av_log(ctx, AV_LOG_ERROR, "Failed to get output port.\n");
1210  return ov2_map_error(status, NULL);
1211  }
1212  if (!ov_model->compiled_model) {
1213 #else
1214  if (ctx->options.input_resizable) {
1215  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
1216  input_shapes.shapes->shape.dims[2] = input_height;
1217  input_shapes.shapes->shape.dims[3] = input_width;
1218  status |= ie_network_reshape(ov_model->network, input_shapes);
1219  ie_network_input_shapes_free(&input_shapes);
1220  if (status != OK) {
1221  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
1222  return DNN_GENERIC_ERROR;
1223  }
1224  }
1225  if (!ov_model->exe_network) {
1226 #endif
1227  ret = init_model_ov(ov_model, input_name, output_name);
1228  if (ret != 0) {
1229  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
1230  return ret;
1231  }
1232  }
1233 
1234  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx);
1235  if (ret != 0) {
1236  goto err;
1237  }
1238 
1239  ret = extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL);
1240  if (ret != 0) {
1241  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
1242  goto err;
1243  }
1244 
1245  request = ff_safe_queue_pop_front(ov_model->request_queue);
1246  if (!request) {
1247  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1248  ret = AVERROR(EINVAL);
1249  goto err;
1250  }
1251 
1252  ret = execute_model_ov(request, ov_model->lltask_queue);
1253  *output_width = task.out_frame->width;
1254  *output_height = task.out_frame->height;
1255 err:
1256  av_frame_free(&task.out_frame);
1257  av_frame_free(&task.in_frame);
1258  return ret;
1259 }
1260 
1261 static DNNModel *dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
1262 {
1263  DNNModel *model = NULL;
1264  OVModel *ov_model = NULL;
1265  OVContext *ctx = NULL;
1266 #if HAVE_OPENVINO2
1267  ov_core_t* core = NULL;
1268  ov_model_t* ovmodel = NULL;
1269  ov_status_e status;
1270 #else
1271  size_t node_count = 0;
1272  char *node_name = NULL;
1273  IEStatusCode status;
1274 #endif
1275 
1276  model = av_mallocz(sizeof(DNNModel));
1277  if (!model){
1278  return NULL;
1279  }
1280 
1281  ov_model = av_mallocz(sizeof(OVModel));
1282  if (!ov_model) {
1283  av_freep(&model);
1284  return NULL;
1285  }
1286  model->model = ov_model;
1287  ov_model->model = model;
1288  ov_model->ctx.class = &dnn_openvino_class;
1289  ctx = &ov_model->ctx;
1290 
1291  //parse options
1293  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
1294  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
1295  goto err;
1296  }
1297 
1298 #if HAVE_OPENVINO2
1299  status = ov_core_create(&core);
1300  if (status != OK) {
1301  goto err;
1302  }
1303  ov_model->core = core;
1304 
1305  status = ov_core_read_model(core, model_filename, NULL, &ovmodel);
1306  if (status != OK) {
1307  ov_version_t ver;
1308  status = ov_get_openvino_version(&ver);
1309  av_log(NULL, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
1310  "Please check if the model version matches the runtime OpenVINO Version:\n",
1311  model_filename);
1312  if (status == OK) {
1313  av_log(NULL, AV_LOG_ERROR, "BuildNumber: %s\n", ver.buildNumber);
1314  }
1315  ov_version_free(&ver);
1316  goto err;
1317  }
1318  ov_model->ov_model = ovmodel;
1319 #else
1320  ov_model->all_input_names = NULL;
1321  ov_model->all_output_names = NULL;
1322 
1323  status = ie_core_create("", &ov_model->core);
1324  if (status != OK)
1325  goto err;
1326 
1327  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
1328  if (status != OK) {
1329  ie_version_t ver;
1330  ver = ie_c_api_version();
1331  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
1332  "Please check if the model version matches the runtime OpenVINO %s\n",
1333  model_filename, ver.api_version);
1334  ie_version_free(&ver);
1335  goto err;
1336  }
1337 
1338  //get all the input and output names
1339  status = ie_network_get_inputs_number(ov_model->network, &node_count);
1340  if (status != OK) {
1341  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
1342  goto err;
1343  }
1344  for (size_t i = 0; i < node_count; i++) {
1345  status = ie_network_get_input_name(ov_model->network, i, &node_name);
1346  if (status != OK) {
1347  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
1348  goto err;
1349  }
1350  APPEND_STRING(ov_model->all_input_names, node_name)
1351  ie_network_name_free(&node_name);
1352  }
1353  status = ie_network_get_outputs_number(ov_model->network, &node_count);
1354  if (status != OK) {
1355  av_log(ctx, AV_LOG_ERROR, "Failed to get output count\n");
1356  goto err;
1357  }
1358  for (size_t i = 0; i < node_count; i++) {
1359  status = ie_network_get_output_name(ov_model->network, i, &node_name);
1360  if (status != OK) {
1361  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d output's name\n", (int)i);
1362  goto err;
1363  }
1364  APPEND_STRING(ov_model->all_output_names, node_name)
1365  ie_network_name_free(&node_name);
1366  }
1367 #endif
1368 
1369  model->get_input = &get_input_ov;
1370  model->get_output = &get_output_ov;
1371  model->options = options;
1372  model->filter_ctx = filter_ctx;
1373  model->func_type = func_type;
1374 
1375  return model;
1376 
1377 err:
1378  dnn_free_model_ov(&model);
1379  return NULL;
1380 }
1381 
1382 static int dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
1383 {
1384  OVModel *ov_model = model->model;
1385  OVContext *ctx = &ov_model->ctx;
1386  OVRequestItem *request;
1387  TaskItem *task;
1388  int ret;
1389 
1390  ret = ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params);
1391  if (ret != 0) {
1392  return ret;
1393  }
1394 
1395 #if HAVE_OPENVINO2
1396  if (!ov_model->compiled_model) {
1397 #else
1398  if (!ov_model->exe_network) {
1399 #endif
1400  ret = init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]);
1401  if (ret != 0) {
1402  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
1403  return ret;
1404  }
1405  }
1406 
1407  task = av_malloc(sizeof(*task));
1408  if (!task) {
1409  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
1410  return AVERROR(ENOMEM);
1411  }
1412 
1413  ret = ff_dnn_fill_task(task, exec_params, ov_model, ctx->options.async, 1);
1414  if (ret != 0) {
1415  av_freep(&task);
1416  return ret;
1417  }
1418 
1419  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
1420  av_freep(&task);
1421  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
1422  return AVERROR(ENOMEM);
1423  }
1424 
1425  ret = extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params);
1426  if (ret != 0) {
1427  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
1428  return ret;
1429  }
1430 
1431  if (ctx->options.async) {
1432  while (ff_queue_size(ov_model->lltask_queue) >= ctx->options.batch_size) {
1433  request = ff_safe_queue_pop_front(ov_model->request_queue);
1434  if (!request) {
1435  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1436  return AVERROR(EINVAL);
1437  }
1438 
1439  ret = execute_model_ov(request, ov_model->lltask_queue);
1440  if (ret != 0) {
1441  return ret;
1442  }
1443  }
1444 
1445  return 0;
1446  }
1447  else {
1448  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
1449  // Classification filter has not been completely
1450  // tested with the sync mode. So, do not support now.
1451  avpriv_report_missing_feature(ctx, "classify for sync execution");
1452  return AVERROR(ENOSYS);
1453  }
1454 
1455  if (ctx->options.batch_size > 1) {
1456  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
1457  return AVERROR(ENOSYS);
1458  }
1459 
1460  request = ff_safe_queue_pop_front(ov_model->request_queue);
1461  if (!request) {
1462  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1463  return AVERROR(EINVAL);
1464  }
1465  return execute_model_ov(request, ov_model->lltask_queue);
1466  }
1467 }
1468 
1469 static DNNAsyncStatusType dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
1470 {
1471  OVModel *ov_model = model->model;
1472  return ff_dnn_get_result_common(ov_model->task_queue, in, out);
1473 }
1474 
1475 static int dnn_flush_ov(const DNNModel *model)
1476 {
1477  OVModel *ov_model = model->model;
1478  OVContext *ctx = &ov_model->ctx;
1479  OVRequestItem *request;
1480 #if HAVE_OPENVINO2
1481  ov_status_e status;
1482 #else
1483  IEStatusCode status;
1484 #endif
1485  int ret;
1486 
1487  if (ff_queue_size(ov_model->lltask_queue) == 0) {
1488  // no pending task need to flush
1489  return 0;
1490  }
1491 
1492  request = ff_safe_queue_pop_front(ov_model->request_queue);
1493  if (!request) {
1494  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1495  return AVERROR(EINVAL);
1496  }
1497 
1498  ret = fill_model_input_ov(ov_model, request);
1499  if (ret != 0) {
1500  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
1501  return ret;
1502  }
1503 #if HAVE_OPENVINO2
1504  status = ov_infer_request_infer(request->infer_request);
1505  if (status != OK) {
1506  av_log(ctx, AV_LOG_ERROR, "Failed to start sync inference for OV2\n");
1507  return ov2_map_error(status, NULL);
1508  }
1509 #else
1510  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
1511  if (status != OK) {
1512  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1513  return DNN_GENERIC_ERROR;
1514  }
1515  status = ie_infer_request_infer_async(request->infer_request);
1516  if (status != OK) {
1517  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1518  return DNN_GENERIC_ERROR;
1519  }
1520 #endif
1521 
1522  return 0;
1523 }
1524 
1526  .load_model = dnn_load_model_ov,
1527  .execute_model = dnn_execute_model_ov,
1528  .get_result = dnn_get_result_ov,
1529  .flush = dnn_flush_ov,
1530  .free_model = dnn_free_model_ov,
1531 };
OVModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_openvino.c:79
ff_dnn_backend_openvino
const DNNModule ff_dnn_backend_openvino
OVModel::input_info
ov_preprocess_input_info_t * input_info
Definition: dnn_backend_openvino.c:66
OVRequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:90
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
OVContext::class
const AVClass * class
Definition: dnn_backend_openvino.c:54
opt.h
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1459
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:824
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:73
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:29
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
get_input_ov
static int get_input_ov(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:959
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
LastLevelTaskItem
Definition: dnn_backend_common.h:50
LastLevelTaskItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_common.h:52
test::height
int height
Definition: vc1dsp.c:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVFrame::width
int width
Definition: frame.h:412
OVOptions::async
uint8_t async
Definition: dnn_backend_openvino.c:45
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_openvino)
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
OVOptions::mean
float mean
Definition: dnn_backend_openvino.c:50
AVOption
AVOption.
Definition: opt.h:251
DNNModule::load_model
DNNModel *(* load_model)(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_interface.h:123
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:110
ov2_map_error
static int ov2_map_error(ov_status_e status, const char **desc)
Definition: dnn_backend_openvino.c:143
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:71
FLAGS
#define FLAGS
Definition: cmdutils.c:515
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:77
dnn_io_proc.h
TaskItem
Definition: dnn_backend_common.h:36
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:58
OVOptions::batch_size
int batch_size
Definition: dnn_backend_openvino.c:46
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:99
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
TaskItem::model
void * model
Definition: dnn_backend_common.h:37
OVModel::output_info
ov_preprocess_output_info_t * output_info
Definition: dnn_backend_openvino.c:68
OVRequestItem::infer_request
ov_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:87
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:51
OVOptions::device_type
char * device_type
Definition: dnn_backend_openvino.c:43
ov2_errors
static const struct @230 ov2_errors[]
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
DL_NHWC
@ DL_NHWC
Definition: dnn_interface.h:62
OVModel::output_port
ov_output_const_port_t * output_port
Definition: dnn_backend_openvino.c:67
Queue
Linear double-ended data structure.
Definition: queue.c:33
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:31
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFrameSideData::size
size_t size
Definition: frame.h:249
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
float
float
Definition: af_crystalizer.c:121
desc
const char * desc
Definition: dnn_backend_openvino.c:121
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:51
DNNExecClassificationParams
Definition: dnn_interface.h:84
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
OVOptions::layout
DNNLayout layout
Definition: dnn_backend_openvino.c:48
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:65
execute_model_ov
static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:862
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:162
ctx
AVFormatContext * ctx
Definition: movenc.c:48
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:45
DL_NCHW
@ DL_NCHW
Definition: dnn_interface.h:61
dnn_free_model_ov
static void dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:490
OVRequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:91
OVModel::preprocess
ov_preprocess_prepostprocessor_t * preprocess
Definition: dnn_backend_openvino.c:69
frame
static AVFrame * frame
Definition: demux_decode.c:54
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:35
if
if(ret)
Definition: filter_design.txt:179
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:181
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:86
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OVModel::all_input_names
const char * all_input_names
Definition: dnn_backend_openvino.c:74
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:413
NULL
#define NULL
Definition: coverity.c:32
OVRequestItem::lltask_count
uint32_t lltask_count
Definition: dnn_backend_openvino.c:85
av_err
int av_err
Definition: dnn_backend_openvino.c:120
init_model_ov
static int init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
Definition: dnn_backend_openvino.c:554
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:72
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:113
get_output_ov
static int get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:1144
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1667
av_opt_free
void av_opt_free(void *obj)
Free all allocated objects in obj.
Definition: opt.c:1719
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:38
OVRequestItem::lltasks
LastLevelTaskItem ** lltasks
Definition: dnn_backend_openvino.c:84
OVModel::ctx
OVContext ctx
Definition: dnn_backend_openvino.c:59
OVRequestItem
Definition: dnn_backend_openvino.c:83
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:209
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:217
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:181
options
const OptionDef options[]
test::width
int width
Definition: vc1dsp.c:38
f
f
Definition: af_crystalizer.c:121
OVModel::compiled_model
ov_compiled_model_t * compiled_model
Definition: dnn_backend_openvino.c:64
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:42
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:46
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
cpu.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
DNNLayout
DNNLayout
Definition: dnn_interface.h:59
OVModel::model
DNNModel * model
Definition: dnn_backend_openvino.c:60
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:115
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:101
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
OVModel::all_output_names
const char * all_output_names
Definition: dnn_backend_openvino.c:75
header
static const uint8_t header[24]
Definition: sdr2.c:67
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:56
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:1041
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
OVOptions::input_resizable
int input_resizable
Definition: dnn_backend_openvino.c:47
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:101
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:345
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
OVModel::ov_model
ov_model_t * ov_model
Definition: dnn_backend_openvino.c:63
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
OVModel::core
ov_core_t * core
Definition: dnn_backend_openvino.c:62
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
safe_queue.h
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:77
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:41
OVContext
Definition: dnn_backend_openvino.c:53
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:117
ret
ret
Definition: filter_design.txt:187
OVOptions::scale
float scale
Definition: dnn_backend_openvino.c:49
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNNModel::get_input
int(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:104
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:39
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:78
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:56
AVFrame::height
int height
Definition: frame.h:412
extract_lltask_from_task
static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:1078
status
ov_status_e status
Definition: dnn_backend_openvino.c:119
dnn_backend_common.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:142
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:78
DL_NONE
@ DL_NONE
Definition: dnn_interface.h:60
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
DNNModel
Definition: dnn_interface.h:93
precision_to_datatype
static DNNDataType precision_to_datatype(ov_element_type_e precision) static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:160
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:40
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
DNNModel::options
const char * options
Definition: dnn_interface.h:97
OVOptions::nireq
int nireq
Definition: dnn_backend_openvino.c:44
ff_frame_to_dnn_classify
int ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:339
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
OVOptions
Definition: dnn_backend_openvino.c:42
DNNExecBaseParams
Definition: dnn_interface.h:76
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
OVModel::input_port
ov_output_const_port_t * input_port
Definition: dnn_backend_openvino.c:65
AVDetectionBBox
Definition: detection_bbox.h:26
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:43
DNNModel::get_output
int(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:106
OVRequestItem::callback
ov_callback_t callback
Definition: dnn_backend_openvino.c:88
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:41
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
detection_bbox.h
fill_model_input_ov
static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
Definition: dnn_backend_openvino.c:195
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:190
DNNModule
Definition: dnn_interface.h:121
OVContext::options
OVOptions options
Definition: dnn_backend_openvino.c:55
DNNModel::model
void * model
Definition: dnn_interface.h:95
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:41