FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_io_proc.h"
27 #include "libavformat/avio.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/avstring.h"
34 #include "safe_queue.h"
35 #if HAVE_OPENVINO2
36 #include <openvino/c/openvino.h>
37 #else
38 #include <c_api/ie_c_api.h>
39 #endif
40 #include "dnn_backend_common.h"
41 
42 typedef struct OVModel{
45 #if HAVE_OPENVINO2
46  ov_core_t *core;
47  ov_model_t *ov_model;
48  ov_compiled_model_t *compiled_model;
49  ov_output_const_port_t* input_port;
50  ov_preprocess_input_info_t* input_info;
51  ov_output_const_port_t** output_ports;
52  ov_preprocess_output_info_t* output_info;
53  ov_preprocess_prepostprocessor_t* preprocess;
54 #else
55  ie_core_t *core;
56  ie_network_t *network;
57  ie_executable_network_t *exe_network;
58  const char *all_input_names;
59  const char *all_output_names;
60 #endif
61  SafeQueue *request_queue; // holds OVRequestItem
62  Queue *task_queue; // holds TaskItem
63  Queue *lltask_queue; // holds LastLevelTaskItem
65 } OVModel;
66 
67 // one request for one call to openvino
68 typedef struct OVRequestItem {
70  uint32_t lltask_count;
71 #if HAVE_OPENVINO2
72  ov_infer_request_t *infer_request;
73  ov_callback_t callback;
74 #else
75  ie_complete_call_back_t callback;
76  ie_infer_request_t *infer_request;
77 #endif
79 
80 #define APPEND_STRING(generated_string, iterate_string) \
81  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
82  av_asprintf("%s", iterate_string);
83 
84 #define OFFSET(x) offsetof(OVOptions, x)
85 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
86 static const AVOption dnn_openvino_options[] = {
87  { "batch_size", "batch size per request", OFFSET(batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
88  { "input_resizable", "can input be resizable or not", OFFSET(input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
89  { "layout", "input layout of model", OFFSET(layout), AV_OPT_TYPE_INT, { .i64 = DL_NONE}, DL_NONE, DL_NHWC, FLAGS, .unit = "layout" },
90  { "none", "none", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NONE }, 0, 0, FLAGS, .unit = "layout"},
91  { "nchw", "nchw", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NCHW }, 0, 0, FLAGS, .unit = "layout"},
92  { "nhwc", "nhwc", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NHWC }, 0, 0, FLAGS, .unit = "layout"},
93  { "scale", "Add scale preprocess operation. Divide each element of input by specified value.", OFFSET(scale), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX, FLAGS},
94  { "mean", "Add mean preprocess operation. Subtract specified value from each element of input.", OFFSET(mean), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX, FLAGS},
95  { NULL }
96 };
97 
98 #if HAVE_OPENVINO2
99 static const struct {
100  ov_status_e status;
101  int av_err;
102  const char *desc;
103 } ov2_errors[] = {
104  { OK, 0, "success" },
105  { GENERAL_ERROR, AVERROR_EXTERNAL, "general error" },
106  { NOT_IMPLEMENTED, AVERROR(ENOSYS), "not implemented" },
107  { NETWORK_NOT_LOADED, AVERROR_EXTERNAL, "network not loaded" },
108  { PARAMETER_MISMATCH, AVERROR(EINVAL), "parameter mismatch" },
109  { NOT_FOUND, AVERROR_EXTERNAL, "not found" },
110  { OUT_OF_BOUNDS, AVERROR(EOVERFLOW), "out of bounds" },
111  { UNEXPECTED, AVERROR_EXTERNAL, "unexpected" },
112  { REQUEST_BUSY, AVERROR(EBUSY), "request busy" },
113  { RESULT_NOT_READY, AVERROR(EBUSY), "result not ready" },
114  { NOT_ALLOCATED, AVERROR(ENODATA), "not allocated" },
115  { INFER_NOT_STARTED, AVERROR_EXTERNAL, "infer not started" },
116  { NETWORK_NOT_READ, AVERROR_EXTERNAL, "network not read" },
117  { INFER_CANCELLED, AVERROR(ECANCELED), "infer cancelled" },
118  { INVALID_C_PARAM, AVERROR(EINVAL), "invalid C parameter" },
119  { UNKNOWN_C_ERROR, AVERROR_UNKNOWN, "unknown C error" },
120  { NOT_IMPLEMENT_C_METHOD, AVERROR(ENOSYS), "not implement C method" },
121  { UNKNOW_EXCEPTION, AVERROR_UNKNOWN, "unknown exception" },
122 };
123 
124 static int ov2_map_error(ov_status_e status, const char **desc)
125 {
126  int i;
127  for (i = 0; i < FF_ARRAY_ELEMS(ov2_errors); i++) {
128  if (ov2_errors[i].status == status) {
129  if (desc)
130  *desc = ov2_errors[i].desc;
131  return ov2_errors[i].av_err;
132  }
133  }
134  if (desc)
135  *desc = "unknown error";
136  return AVERROR_UNKNOWN;
137 }
138 #endif
139 
140 #if HAVE_OPENVINO2
141 static DNNDataType precision_to_datatype(ov_element_type_e precision)
142 #else
143 static DNNDataType precision_to_datatype(precision_e precision)
144 #endif
145 {
146  switch (precision)
147  {
148 #if HAVE_OPENVINO2
149  case F32:
150 #else
151  case FP32:
152 #endif
153  return DNN_FLOAT;
154  case U8:
155  return DNN_UINT8;
156  default:
157  av_assert0(!"not supported yet.");
158  return DNN_FLOAT;
159  }
160 }
161 
163 {
164  switch (dt)
165  {
166  case DNN_FLOAT:
167  return sizeof(float);
168  case DNN_UINT8:
169  return sizeof(uint8_t);
170  default:
171  av_assert0(!"not supported yet.");
172  return 1;
173  }
174 }
175 
176 static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
177 {
178  DNNData input;
179  LastLevelTaskItem *lltask;
180  TaskItem *task;
181  DnnContext *ctx = ov_model->ctx;
182 #if HAVE_OPENVINO2
183  int64_t* dims;
184  ov_status_e status;
185  ov_tensor_t* tensor = NULL;
186  ov_shape_t input_shape = {0};
187  ov_element_type_e precision;
188  char *port_name;
189 #else
190  dimensions_t dims;
191  precision_e precision;
192  ie_blob_buffer_t blob_buffer;
193  IEStatusCode status;
194  ie_blob_t *input_blob = NULL;
195 #endif
196 
197  memset(&input, 0, sizeof(input));
198  lltask = ff_queue_peek_front(ov_model->lltask_queue);
199  av_assert0(lltask);
200  task = lltask->task;
201 
202 #if HAVE_OPENVINO2
203  if (ov_model->input_port) {
204  ov_output_const_port_free(ov_model->input_port);
205  ov_model->input_port = NULL;
206  }
207  if (task->input_name)
208  status = ov_model_const_input_by_name(ov_model->ov_model, task->input_name, &ov_model->input_port);
209  else
210  status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
211  if (status != OK) {
212  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
213  return ov2_map_error(status, NULL);
214  }
215  status = ov_port_get_any_name(ov_model->input_port, &port_name);
216  if (status != OK) {
217  av_log(ctx, AV_LOG_ERROR, "Failed to get input port name.\n");
218  return ov2_map_error(status, NULL);
219  }
220  av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model input: %s\n", port_name);
221  ov_free(port_name);
222  port_name = NULL;
223 
224  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
225  if (status != OK) {
226  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
227  return ov2_map_error(status, NULL);
228  }
229  dims = input_shape.dims;
230  status = ov_port_get_element_type(ov_model->input_port, &precision);
231  if (status != OK) {
232  av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
233  ov_shape_free(&input_shape);
234  return ov2_map_error(status, NULL);
235  }
236  for (int i = 0; i < input_shape.rank; i++)
237  input.dims[i] = dims[i];
238  input.layout = DL_NHWC;
239  input.dt = precision_to_datatype(precision);
240 #else
241  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
242  if (status != OK) {
243  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
244  return DNN_GENERIC_ERROR;
245  }
246 
247  status |= ie_blob_get_dims(input_blob, &dims);
248  status |= ie_blob_get_precision(input_blob, &precision);
249  if (status != OK) {
250  ie_blob_free(&input_blob);
251  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
252  return DNN_GENERIC_ERROR;
253  }
254 
255  status = ie_blob_get_buffer(input_blob, &blob_buffer);
256  if (status != OK) {
257  ie_blob_free(&input_blob);
258  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
259  return DNN_GENERIC_ERROR;
260  }
261  for (int i = 0; i < input_shape.rank; i++)
262  input.dims[i] = dims[i];
263  input.layout = DL_NCHW;
264  input.data = blob_buffer.buffer;
265  input.dt = precision_to_datatype(precision);
266 #endif
267  // all models in openvino open model zoo use BGR as input,
268  // change to be an option when necessary.
269  input.order = DCO_BGR;
270  // We use preprocess_steps to scale input data, so disable scale and mean here.
271  input.scale = 1;
272  input.mean = 0;
273 
274  for (int i = 0; i < ctx->ov_option.batch_size; ++i) {
275  lltask = ff_queue_pop_front(ov_model->lltask_queue);
276  if (!lltask) {
277  break;
278  }
279  request->lltasks[i] = lltask;
280  request->lltask_count = i + 1;
281  task = lltask->task;
282 #if HAVE_OPENVINO2
283  if (tensor)
284  ov_tensor_free(tensor);
285  status = ov_tensor_create(precision, input_shape, &tensor);
286  ov_shape_free(&input_shape);
287  if (status != OK) {
288  av_log(ctx, AV_LOG_ERROR, "Failed to create tensor from host prt.\n");
289  return ov2_map_error(status, NULL);
290  }
291  status = ov_tensor_data(tensor, &input.data);
292  if (status != OK) {
293  av_log(ctx, AV_LOG_ERROR, "Failed to get input data.\n");
294  return ov2_map_error(status, NULL);
295  }
296  status = ov_infer_request_set_input_tensor(request->infer_request, tensor);
297  if (status != OK) {
298  av_log(ctx, AV_LOG_ERROR, "Failed to Set an input tensor for the model.\n");
299  return ov2_map_error(status, NULL);
300  }
301 #endif
302  switch (ov_model->model.func_type) {
303  case DFT_PROCESS_FRAME:
304  if (task->do_ioproc) {
305  if (ov_model->model.frame_pre_proc != NULL) {
306  ov_model->model.frame_pre_proc(task->in_frame, &input, ov_model->model.filter_ctx);
307  } else {
309  }
310  }
311  break;
314  break;
317  break;
318  default:
319  av_assert0(!"should not reach here");
320  break;
321  }
322  input.data = (uint8_t *)input.data +
323  input.dims[1] * input.dims[2] * input.dims[3] * get_datatype_size(input.dt);
324  }
325 #if HAVE_OPENVINO2
326  ov_tensor_free(tensor);
327 #else
328  ie_blob_free(&input_blob);
329 #endif
330 
331  return 0;
332 }
333 
334 static void infer_completion_callback(void *args)
335 {
336  OVRequestItem *request = args;
337  LastLevelTaskItem *lltask = request->lltasks[0];
338  TaskItem *task = lltask->task;
339  OVModel *ov_model = task->model;
340  SafeQueue *requestq = ov_model->request_queue;
341  DNNData *outputs;
342  DnnContext *ctx = ov_model->ctx;
343 #if HAVE_OPENVINO2
344  size_t* dims;
345  ov_status_e status;
346  ov_tensor_t *output_tensor;
347  ov_shape_t output_shape = {0};
348  ov_element_type_e precision;
349 
350  outputs = av_calloc(ov_model->nb_outputs, sizeof(*outputs));
351  if (!outputs) {
352  av_log(ctx, AV_LOG_ERROR, "Failed to alloc outputs.");
353  return;
354  }
355 
356  for (int i = 0; i < ov_model->nb_outputs; i++) {
357  status = ov_infer_request_get_tensor_by_const_port(request->infer_request,
358  ov_model->output_ports[i],
359  &output_tensor);
360  if (status != OK) {
362  "Failed to get output tensor.");
363  goto end;
364  }
365 
366  status = ov_tensor_data(output_tensor, &outputs[i].data);
367  if (status != OK) {
369  "Failed to get output data.");
370  goto end;
371  }
372 
373  status = ov_tensor_get_shape(output_tensor, &output_shape);
374  if (status != OK) {
375  av_log(ctx, AV_LOG_ERROR, "Failed to get output port shape.\n");
376  goto end;
377  }
378  dims = output_shape.dims;
379 
380  status = ov_port_get_element_type(ov_model->output_ports[i], &precision);
381  if (status != OK) {
382  av_log(ctx, AV_LOG_ERROR, "Failed to get output port data type.\n");
383  goto end;
384  }
385  outputs[i].dt = precision_to_datatype(precision);
386  outputs[i].layout = DL_NCHW;
387  outputs[i].dims[0] = 1;
388  outputs[i].dims[1] = output_shape.rank > 2 ? dims[output_shape.rank - 3] : 1;
389  outputs[i].dims[2] = output_shape.rank > 1 ? dims[output_shape.rank - 2] : 1;
390  outputs[i].dims[3] = output_shape.rank > 0 ? dims[output_shape.rank - 1] : 1;
391  av_assert0(request->lltask_count <= dims[0]);
392  outputs[i].layout = ctx->ov_option.layout;
393  outputs[i].scale = ctx->ov_option.scale;
394  outputs[i].mean = ctx->ov_option.mean;
395  ov_shape_free(&output_shape);
396  ov_tensor_free(output_tensor);
397  output_tensor = NULL;
398  }
399 #else
400  IEStatusCode status;
401  dimensions_t dims;
402  ie_blob_t *output_blob = NULL;
403  ie_blob_buffer_t blob_buffer;
404  precision_e precision;
405  DNNData output;
406  status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
407  if (status != OK) {
409  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
410  task->output_names[0], ov_model->all_output_names);
411  return;
412  }
413 
414  status = ie_blob_get_buffer(output_blob, &blob_buffer);
415  if (status != OK) {
416  ie_blob_free(&output_blob);
417  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
418  return;
419  }
420 
421  status |= ie_blob_get_dims(output_blob, &dims);
422  status |= ie_blob_get_precision(output_blob, &precision);
423  if (status != OK) {
424  ie_blob_free(&output_blob);
425  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
426  return;
427  }
428  output.data = blob_buffer.buffer;
429  output.layout = DL_NCHW;
430  for (int i = 0; i < 4; i++)
431  output.dims[i] = dims.dims[i];
432  av_assert0(request->lltask_count <= dims.dims[0]);
433  output.dt = precision_to_datatype(precision);
434  output.layout = ctx->ov_option.layout;
435  output.scale = ctx->ov_option.scale;
436  output.mean = ctx->ov_option.mean;
437  outputs = &output;
438 #endif
439 
440  av_assert0(request->lltask_count >= 1);
441  for (int i = 0; i < request->lltask_count; ++i) {
442  task = request->lltasks[i]->task;
443 
444  switch (ov_model->model.func_type) {
445  case DFT_PROCESS_FRAME:
446  if (task->do_ioproc) {
447  if (ov_model->model.frame_post_proc != NULL) {
448  ov_model->model.frame_post_proc(task->out_frame, outputs, ov_model->model.filter_ctx);
449  } else {
451  }
452  } else {
453  task->out_frame->width =
455  task->out_frame->height =
457  }
458  break;
460  if (!ov_model->model.detect_post_proc) {
461  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
462  goto end;
463  }
464  ov_model->model.detect_post_proc(task->in_frame, outputs,
465  ov_model->nb_outputs,
466  ov_model->model.filter_ctx);
467  break;
469  if (!ov_model->model.classify_post_proc) {
470  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
471  goto end;
472  }
473  for (int output_i = 0; output_i < ov_model->nb_outputs; output_i++)
474  ov_model->model.classify_post_proc(task->in_frame, outputs,
475  request->lltasks[i]->bbox_index,
476  ov_model->model.filter_ctx);
477  break;
478  default:
479  av_assert0(!"should not reach here");
480  break;
481  }
482 
483  task->inference_done++;
484  av_freep(&request->lltasks[i]);
485  for (int i = 0; i < ov_model->nb_outputs; i++)
486  outputs[i].data = (uint8_t *)outputs[i].data +
487  outputs[i].dims[1] * outputs[i].dims[2] * outputs[i].dims[3] *
489  }
490 end:
491 #if HAVE_OPENVINO2
492  av_freep(&outputs);
493  ov_shape_free(&output_shape);
494  if (output_tensor)
495  ov_tensor_free(output_tensor);
496 #else
497  ie_blob_free(&output_blob);
498 #endif
499  request->lltask_count = 0;
500  if (ff_safe_queue_push_back(requestq, request) < 0) {
501 #if HAVE_OPENVINO2
502  ov_infer_request_free(request->infer_request);
503 #else
504  ie_infer_request_free(&request->infer_request);
505 #endif
506  av_freep(&request);
507  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
508  return;
509  }
510 }
511 
512 static void dnn_free_model_ov(DNNModel **model)
513 {
514  OVModel *ov_model;
515 
516  if (!model || !*model)
517  return;
518 
519  ov_model = (OVModel *)(*model);
520  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
522  if (item && item->infer_request) {
523 #if HAVE_OPENVINO2
524  ov_infer_request_free(item->infer_request);
525 #else
526  ie_infer_request_free(&item->infer_request);
527 #endif
528  }
529  av_freep(&item->lltasks);
530  av_freep(&item);
531  }
533 
534  while (ff_queue_size(ov_model->lltask_queue) != 0) {
536  av_freep(&item);
537  }
538  ff_queue_destroy(ov_model->lltask_queue);
539 
540  while (ff_queue_size(ov_model->task_queue) != 0) {
541  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
542  av_frame_free(&item->in_frame);
543  av_frame_free(&item->out_frame);
544  av_freep(&item);
545  }
546  ff_queue_destroy(ov_model->task_queue);
547 #if HAVE_OPENVINO2
548  if (ov_model->input_port)
549  ov_output_const_port_free(ov_model->input_port);
550  for (int i = 0; i < ov_model->nb_outputs; i++)
551  if (ov_model->output_ports[i])
552  ov_output_const_port_free(ov_model->output_ports[i]);
553  av_freep(&ov_model->output_ports);
554  if (ov_model->preprocess)
555  ov_preprocess_prepostprocessor_free(ov_model->preprocess);
556  if (ov_model->compiled_model)
557  ov_compiled_model_free(ov_model->compiled_model);
558  if (ov_model->ov_model)
559  ov_model_free(ov_model->ov_model);
560  if (ov_model->core)
561  ov_core_free(ov_model->core);
562 #else
563  if (ov_model->exe_network)
564  ie_exec_network_free(&ov_model->exe_network);
565  if (ov_model->network)
566  ie_network_free(&ov_model->network);
567  if (ov_model->core)
568  ie_core_free(&ov_model->core);
569  av_free(ov_model->all_output_names);
570  av_free(ov_model->all_input_names);
571 #endif
572  av_freep(&ov_model);
573  *model = NULL;
574 }
575 
576 
577 static int init_model_ov(OVModel *ov_model, const char *input_name, const char **output_names, int nb_outputs)
578 {
579  int ret = 0;
580  DnnContext *ctx = ov_model->ctx;
581 #if HAVE_OPENVINO2
582  ov_status_e status;
583  ov_preprocess_input_tensor_info_t* input_tensor_info = NULL;
584  ov_preprocess_output_tensor_info_t* output_tensor_info = NULL;
585  ov_preprocess_input_model_info_t* input_model_info = NULL;
586  ov_model_t *tmp_ov_model;
587  ov_layout_t* NHWC_layout = NULL;
588  ov_layout_t* NCHW_layout = NULL;
589  const char* NHWC_desc = "NHWC";
590  const char* NCHW_desc = "NCHW";
591  const char* device = ctx->device ? ctx->device : "CPU";
592 #else
593  IEStatusCode status;
594  ie_available_devices_t a_dev;
595  ie_config_t config = {NULL, NULL, NULL};
596  char *all_dev_names = NULL;
597 #endif
598  // We scale pixel by default when do frame processing.
599  if (fabsf(ctx->ov_option.scale) < 1e-6f)
600  ctx->ov_option.scale = ov_model->model.func_type == DFT_PROCESS_FRAME ? 255 : 1;
601  // batch size
602  if (ctx->ov_option.batch_size <= 0) {
603  ctx->ov_option.batch_size = 1;
604  }
605 #if HAVE_OPENVINO2
606  if (ctx->ov_option.batch_size > 1) {
607  avpriv_report_missing_feature(ctx, "Do not support batch_size > 1 for now,"
608  "change batch_size to 1.\n");
609  ctx->ov_option.batch_size = 1;
610  }
611 
612  status = ov_preprocess_prepostprocessor_create(ov_model->ov_model, &ov_model->preprocess);
613  if (status != OK) {
614  av_log(ctx, AV_LOG_ERROR, "Failed to create preprocess for ov_model.\n");
616  goto err;
617  }
618 
619  if (input_name)
620  status = ov_preprocess_prepostprocessor_get_input_info_by_name(ov_model->preprocess, input_name, &ov_model->input_info);
621  else
622  status = ov_preprocess_prepostprocessor_get_input_info(ov_model->preprocess, &ov_model->input_info);
623  if (status != OK) {
624  av_log(ctx, AV_LOG_ERROR, "Failed to get input info from preprocess.\n");
626  goto err;
627  }
628 
629  status = ov_preprocess_input_info_get_tensor_info(ov_model->input_info, &input_tensor_info);
630  if (status != OK) {
631  av_log(ctx, AV_LOG_ERROR, "Failed to get tensor info from input.\n");
633  goto err;
634  }
635 
636  //set input layout
637  status = ov_layout_create(NHWC_desc, &NHWC_layout);
638  status |= ov_layout_create(NCHW_desc, &NCHW_layout);
639  if (status != OK) {
640  av_log(ctx, AV_LOG_ERROR, "Failed to create layout for input.\n");
642  goto err;
643  }
644 
645  status = ov_preprocess_input_tensor_info_set_layout(input_tensor_info, NHWC_layout);
646  if (status != OK) {
647  av_log(ctx, AV_LOG_ERROR, "Failed to set input tensor layout\n");
649  goto err;
650  }
651 
652  status = ov_preprocess_input_info_get_model_info(ov_model->input_info, &input_model_info);
653  if (status != OK) {
654  av_log(ctx, AV_LOG_ERROR, "Failed to get input model info\n");
656  goto err;
657  }
658  if (ctx->ov_option.layout == DL_NCHW)
659  status = ov_preprocess_input_model_info_set_layout(input_model_info, NCHW_layout);
660  else if (ctx->ov_option.layout == DL_NHWC)
661  status = ov_preprocess_input_model_info_set_layout(input_model_info, NHWC_layout);
662  if (status != OK) {
663  av_log(ctx, AV_LOG_ERROR, "Failed to get set input model layout\n");
665  goto err;
666  }
667 
668  status = ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, U8);
669  if (status != OK) {
670  av_log(ctx, AV_LOG_ERROR, "Failed to set input element type\n");
672  goto err;
673  }
674 
675  if (!nb_outputs) {
676  size_t output_size;
677  status = ov_model_outputs_size(ov_model->ov_model, &output_size);
678  if (status != OK) {
679  av_log(ctx, AV_LOG_ERROR, "Failed to get output size.\n");
681  goto err;
682  }
683  nb_outputs = output_size;
684  }
685  ov_model->nb_outputs = nb_outputs;
686  for (int i = 0; i < nb_outputs; i++) {
687  if (output_names)
688  status = ov_preprocess_prepostprocessor_get_output_info_by_name(
689  ov_model->preprocess, output_names[i], &ov_model->output_info);
690  else
691  status = ov_preprocess_prepostprocessor_get_output_info_by_index(
692  ov_model->preprocess, i, &ov_model->output_info);
693  if (status != OK) {
694  av_log(ctx, AV_LOG_ERROR, "Failed to get output info from preprocess.\n");
696  goto err;
697  }
698  status |= ov_preprocess_output_info_get_tensor_info(ov_model->output_info, &output_tensor_info);
699  if (status != OK) {
700  av_log(ctx, AV_LOG_ERROR, "Failed to get tensor info from input/output.\n");
702  goto err;
703  }
704  if (ov_model->model.func_type != DFT_PROCESS_FRAME)
705  status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
706  else if (fabsf(ctx->ov_option.scale - 1) > 1e-6f || fabsf(ctx->ov_option.mean) > 1e-6f)
707  status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
708  else
709  status |= ov_preprocess_output_set_element_type(output_tensor_info, U8);
710  if (status != OK) {
711  av_log(ctx, AV_LOG_ERROR, "Failed to set output element type\n");
713  goto err;
714  }
715  ov_preprocess_output_tensor_info_free(output_tensor_info);
716  output_tensor_info = NULL;
717  ov_preprocess_output_info_free(ov_model->output_info);
718  ov_model->output_info = NULL;
719  }
720  // set preprocess steps.
721  if (fabsf(ctx->ov_option.scale - 1) > 1e-6f || fabsf(ctx->ov_option.mean) > 1e-6f) {
722  ov_preprocess_preprocess_steps_t* input_process_steps = NULL;
723  status = ov_preprocess_input_info_get_preprocess_steps(ov_model->input_info, &input_process_steps);
724  if (status != OK) {
725  av_log(ctx, AV_LOG_ERROR, "Failed to get preprocess steps\n");
727  goto err;
728  }
729  status = ov_preprocess_preprocess_steps_convert_element_type(input_process_steps, F32);
730  status |= ov_preprocess_preprocess_steps_mean(input_process_steps, ctx->ov_option.mean);
731  status |= ov_preprocess_preprocess_steps_scale(input_process_steps, ctx->ov_option.scale);
732  if (status != OK) {
733  av_log(ctx, AV_LOG_ERROR, "Failed to set preprocess steps\n");
734  ov_preprocess_preprocess_steps_free(input_process_steps);
735  input_process_steps = NULL;
737  goto err;
738  }
739  ov_preprocess_preprocess_steps_free(input_process_steps);
740  input_process_steps = NULL;
741  }
742  ov_preprocess_input_tensor_info_free(input_tensor_info);
743  input_tensor_info = NULL;
744  ov_preprocess_input_info_free(ov_model->input_info);
745  ov_model->input_info = NULL;
746 
747  //update model
748  if(ov_model->ov_model)
749  tmp_ov_model = ov_model->ov_model;
750  status = ov_preprocess_prepostprocessor_build(ov_model->preprocess, &ov_model->ov_model);
751  if (status != OK) {
752  av_log(ctx, AV_LOG_ERROR, "Failed to update OV model\n");
753  ov_model_free(tmp_ov_model);
754  tmp_ov_model = NULL;
756  goto err;
757  }
758  ov_model_free(tmp_ov_model);
759 
760  //update output_port
761  if (!ov_model->output_ports) {
762  ov_model->output_ports = av_calloc(nb_outputs, sizeof(*ov_model->output_ports));
763  if (!ov_model->output_ports) {
764  ret = AVERROR(ENOMEM);
765  goto err;
766  }
767  } else
768  for (int i = 0; i < nb_outputs; i++) {
769  ov_output_const_port_free(ov_model->output_ports[i]);
770  ov_model->output_ports[i] = NULL;
771  }
772 
773  for (int i = 0; i < nb_outputs; i++) {
774  char *port_name;
775  if (output_names)
776  status = ov_model_const_output_by_name(ov_model->ov_model, output_names[i],
777  &ov_model->output_ports[i]);
778  else
779  status = ov_model_const_output_by_index(ov_model->ov_model, i,
780  &ov_model->output_ports[i]);
781  if (status != OK) {
782  av_log(ctx, AV_LOG_ERROR, "Failed to get output port %s.\n", output_names[i]);
783  goto err;
784  }
785  status = ov_port_get_any_name(ov_model->output_ports[i], &port_name);
786  if (status != OK) {
787  av_log(ctx, AV_LOG_ERROR, "Failed to get output port name.\n");
788  goto err;
789  }
790  av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model outputs: %s\n", port_name);
791  ov_free(port_name);
792  port_name = NULL;
793  }
794  //compile network
795  status = ov_core_compile_model(ov_model->core, ov_model->ov_model, device, 0, &ov_model->compiled_model);
796  if (status != OK) {
798  goto err;
799  }
800  ov_preprocess_input_model_info_free(input_model_info);
801  input_model_info = NULL;
802  ov_layout_free(NCHW_layout);
803  ov_layout_free(NHWC_layout);
804 #else
805  if (ctx->ov_option.batch_size > 1) {
806  input_shapes_t input_shapes;
807  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
808  if (status != OK) {
810  goto err;
811  }
812  for (int i = 0; i < input_shapes.shape_num; i++)
813  input_shapes.shapes[i].shape.dims[0] = ctx->ov_option.batch_size;
814  status = ie_network_reshape(ov_model->network, input_shapes);
815  ie_network_input_shapes_free(&input_shapes);
816  if (status != OK) {
818  goto err;
819  }
820  }
821 
822  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
823  // while we pass NHWC data from FFmpeg to openvino
824  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
825  if (status != OK) {
826  if (status == NOT_FOUND) {
827  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set input layout as NHWC, "\
828  "all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
829  } else{
830  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
831  }
833  goto err;
834  }
835  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
836  if (status != OK) {
837  if (status == NOT_FOUND) {
838  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set output layout as NHWC, "\
839  "all output(s) are: \"%s\"\n", output_name, ov_model->all_output_names);
840  } else{
841  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
842  }
844  goto err;
845  }
846  ov_model->nb_outputs = 1;
847 
848  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
849  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
850  // ask openvino to do the conversion internally.
851  // the current supported SR model (frame processing) is generated from tensorflow model,
852  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
853  // TODO: we need to get a final clear&general solution with all backends/formats considered.
854  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
855  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
856  if (status != OK) {
857  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
859  goto err;
860  }
861  }
862 
863  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->device, &config, &ov_model->exe_network);
864  if (status != OK) {
865  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
866  status = ie_core_get_available_devices(ov_model->core, &a_dev);
867  if (status != OK) {
868  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
870  goto err;
871  }
872  for (int i = 0; i < a_dev.num_devices; i++) {
873  APPEND_STRING(all_dev_names, a_dev.devices[i])
874  }
875  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
876  ctx->device, all_dev_names);
877  ret = AVERROR(ENODEV);
878  goto err;
879  }
880 #endif
881  // create infer_requests for async execution
882  if (ctx->nireq <= 0) {
883  // the default value is a rough estimation
884  ctx->nireq = av_cpu_count() / 2 + 1;
885  }
886 
887  ov_model->request_queue = ff_safe_queue_create();
888  if (!ov_model->request_queue) {
889  ret = AVERROR(ENOMEM);
890  goto err;
891  }
892 
893  for (int i = 0; i < ctx->nireq; i++) {
894  OVRequestItem *item = av_mallocz(sizeof(*item));
895  if (!item) {
896  ret = AVERROR(ENOMEM);
897  goto err;
898  }
899 
900 #if HAVE_OPENVINO2
901  item->callback.callback_func = infer_completion_callback;
902 #else
903  item->callback.completeCallBackFunc = infer_completion_callback;
904 #endif
905  item->callback.args = item;
906  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
907  av_freep(&item);
908  ret = AVERROR(ENOMEM);
909  goto err;
910  }
911 
912 #if HAVE_OPENVINO2
913  status = ov_compiled_model_create_infer_request(ov_model->compiled_model, &item->infer_request);
914  if (status != OK) {
915  av_log(ctx, AV_LOG_ERROR, "Failed to Creates an inference request object.\n");
916  goto err;
917  }
918 #else
919  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
920  if (status != OK) {
922  goto err;
923  }
924 #endif
925 
926  item->lltasks = av_malloc_array(ctx->ov_option.batch_size, sizeof(*item->lltasks));
927  if (!item->lltasks) {
928  ret = AVERROR(ENOMEM);
929  goto err;
930  }
931  item->lltask_count = 0;
932  }
933 
934  ov_model->task_queue = ff_queue_create();
935  if (!ov_model->task_queue) {
936  ret = AVERROR(ENOMEM);
937  goto err;
938  }
939 
940  ov_model->lltask_queue = ff_queue_create();
941  if (!ov_model->lltask_queue) {
942  ret = AVERROR(ENOMEM);
943  goto err;
944  }
945 
946  return 0;
947 
948 err:
949 #if HAVE_OPENVINO2
950  if (output_tensor_info)
951  ov_preprocess_output_tensor_info_free(output_tensor_info);
952  if (ov_model->output_info)
953  ov_preprocess_output_info_free(ov_model->output_info);
954  if (NCHW_layout)
955  ov_layout_free(NCHW_layout);
956  if (NHWC_layout)
957  ov_layout_free(NHWC_layout);
958  if (input_model_info)
959  ov_preprocess_input_model_info_free(input_model_info);
960 #endif
961  return ret;
962 }
963 
964 static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
965 {
966 #if HAVE_OPENVINO2
967  ov_status_e status;
968 #else
969  IEStatusCode status;
970 #endif
971  LastLevelTaskItem *lltask;
972  int ret = 0;
973  TaskItem *task;
974  DnnContext *ctx;
975  OVModel *ov_model;
976 
977  if (ff_queue_size(inferenceq) == 0) {
978 #if HAVE_OPENVINO2
979  ov_infer_request_free(request->infer_request);
980 #else
981  ie_infer_request_free(&request->infer_request);
982 #endif
983  av_freep(&request);
984  return 0;
985  }
986 
987  lltask = ff_queue_peek_front(inferenceq);
988  task = lltask->task;
989  ov_model = task->model;
990  ctx = ov_model->ctx;
991 
992  ret = fill_model_input_ov(ov_model, request);
993  if (ret != 0) {
994  goto err;
995  }
996 
997 #if HAVE_OPENVINO2
998  if (task->async) {
999  status = ov_infer_request_set_callback(request->infer_request, &request->callback);
1000  if (status != OK) {
1001  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1003  goto err;
1004  }
1005 
1006  status = ov_infer_request_start_async(request->infer_request);
1007  if (status != OK) {
1008  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1010  goto err;
1011  }
1012  return 0;
1013  } else {
1014  status = ov_infer_request_infer(request->infer_request);
1015  if (status != OK) {
1016  av_log(NULL, AV_LOG_ERROR, "Failed to start synchronous model inference for OV2\n");
1018  goto err;
1019  }
1020  infer_completion_callback(request);
1021  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
1022  }
1023 #else
1024  if (task->async) {
1025  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
1026  if (status != OK) {
1027  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1029  goto err;
1030  }
1031  status = ie_infer_request_infer_async(request->infer_request);
1032  if (status != OK) {
1033  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1035  goto err;
1036  }
1037  return 0;
1038  } else {
1039  status = ie_infer_request_infer(request->infer_request);
1040  if (status != OK) {
1041  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
1043  goto err;
1044  }
1045  infer_completion_callback(request);
1046  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
1047  }
1048 #endif
1049 err:
1050  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
1051 #if HAVE_OPENVINO2
1052  ov_infer_request_free(request->infer_request);
1053 #else
1054  ie_infer_request_free(&request->infer_request);
1055 #endif
1056  av_freep(&request);
1057  }
1058  return ret;
1059 }
1060 
1061 static int get_input_ov(DNNModel *model, DNNData *input, const char *input_name)
1062 {
1063  OVModel *ov_model = (OVModel *)model;
1064  DnnContext *ctx = ov_model->ctx;
1065  int input_resizable = ctx->ov_option.input_resizable;
1066 
1067 #if HAVE_OPENVINO2
1068  ov_shape_t input_shape = {0};
1069  ov_element_type_e precision;
1070  ov_status_e status;
1071  if (input_name)
1072  status = ov_model_const_input_by_name(ov_model->ov_model, input_name, &ov_model->input_port);
1073  else
1074  status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
1075  if (status != OK) {
1076  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
1077  return ov2_map_error(status, NULL);
1078  }
1079  status = ov_port_get_element_type(ov_model->input_port, &precision);
1080  if (status != OK) {
1081  av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
1082  return ov2_map_error(status, NULL);
1083  }
1084  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
1085  if (status != OK) {
1086  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
1087  return ov2_map_error(status, NULL);
1088  }
1089  for (int i = 0; i < 4; i++)
1090  input->dims[i] = input_shape.dims[i];
1091  if (input_resizable) {
1092  input->dims[dnn_get_width_idx_by_layout(input->layout)] = -1;
1093  input->dims[dnn_get_height_idx_by_layout(input->layout)] = -1;
1094  }
1095 
1096  if (input_shape.dims[1] <= 3) // NCHW
1097  input->layout = DL_NCHW;
1098  else // NHWC
1099  input->layout = DL_NHWC;
1100 
1101  input->dt = precision_to_datatype(precision);
1102  ov_shape_free(&input_shape);
1103  return 0;
1104 #else
1105  char *model_input_name = NULL;
1106  IEStatusCode status;
1107  size_t model_input_count = 0;
1108  dimensions_t dims;
1109  precision_e precision;
1110  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
1111  if (status != OK) {
1112  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
1113  return DNN_GENERIC_ERROR;
1114  }
1115  for (size_t i = 0; i < model_input_count; i++) {
1116  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
1117  if (status != OK) {
1118  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
1119  return DNN_GENERIC_ERROR;
1120  }
1121  if (strcmp(model_input_name, input_name) == 0) {
1122  ie_network_name_free(&model_input_name);
1123  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
1124  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
1125  if (status != OK) {
1126  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
1127  return DNN_GENERIC_ERROR;
1128  }
1129 
1130  for (int i = 0; i < 4; i++)
1131  input->dims[i] = input_shape.dims[i];
1132  if (input_resizable) {
1133  input->dims[dnn_get_width_idx_by_layout(input->layout)] = -1;
1134  input->dims[dnn_get_height_idx_by_layout(input->layout)] = -1;
1135  }
1136 
1137  if (input_shape.dims[1] <= 3) // NCHW
1138  input->layout = DL_NCHW;
1139  else // NHWC
1140  input->layout = DL_NHWC;
1141 
1142  input->dt = precision_to_datatype(precision);
1143  return 0;
1144  }
1145 
1146  ie_network_name_free(&model_input_name);
1147  }
1148 
1149  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
1150  return AVERROR(EINVAL);
1151 #endif
1152 }
1153 
1155 {
1156  AVFrameSideData *sd;
1158  const AVDetectionBBox *bbox;
1159 
1161  if (!sd) { // this frame has nothing detected
1162  return 0;
1163  }
1164 
1165  if (!sd->size) {
1166  return 0;
1167  }
1168 
1169  header = (const AVDetectionBBoxHeader *)sd->data;
1170  if (!header->nb_bboxes) {
1171  return 0;
1172  }
1173 
1174  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
1175  bbox = av_get_detection_bbox(header, i);
1176  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
1177  return 0;
1178  }
1179  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->height) {
1180  return 0;
1181  }
1182 
1184  return 0;
1185  }
1186  }
1187 
1188  return 1;
1189 }
1190 
1191 static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
1192 {
1193  switch (func_type) {
1194  case DFT_PROCESS_FRAME:
1195  case DFT_ANALYTICS_DETECT:
1196  {
1197  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
1198  if (!lltask) {
1199  return AVERROR(ENOMEM);
1200  }
1201  task->inference_todo = 1;
1202  task->inference_done = 0;
1203  lltask->task = task;
1204  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
1205  av_freep(&lltask);
1206  return AVERROR(ENOMEM);
1207  }
1208  return 0;
1209  }
1211  {
1213  AVFrame *frame = task->in_frame;
1214  AVFrameSideData *sd;
1216 
1217  task->inference_todo = 0;
1218  task->inference_done = 0;
1219 
1221  return 0;
1222  }
1223 
1225  header = (const AVDetectionBBoxHeader *)sd->data;
1226 
1227  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
1228  LastLevelTaskItem *lltask;
1230 
1231  if (params->target) {
1232  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
1233  continue;
1234  }
1235  }
1236 
1237  lltask = av_malloc(sizeof(*lltask));
1238  if (!lltask) {
1239  return AVERROR(ENOMEM);
1240  }
1241  task->inference_todo++;
1242  lltask->task = task;
1243  lltask->bbox_index = i;
1244  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
1245  av_freep(&lltask);
1246  return AVERROR(ENOMEM);
1247  }
1248  }
1249  return 0;
1250  }
1251  default:
1252  av_assert0(!"should not reach here");
1253  return AVERROR(EINVAL);
1254  }
1255 }
1256 
1257 static int get_output_ov(DNNModel *model, const char *input_name, int input_width, int input_height,
1258  const char *output_name, int *output_width, int *output_height)
1259 {
1260 #if HAVE_OPENVINO2
1261  ov_dimension_t dims[4] = {{1, 1}, {1, 1}, {input_height, input_height}, {input_width, input_width}};
1262  ov_status_e status;
1263  ov_shape_t input_shape = {0};
1264  ov_partial_shape_t partial_shape;
1265 #else
1266  IEStatusCode status;
1267  input_shapes_t input_shapes;
1268 #endif
1269  int ret;
1270  OVModel *ov_model = (OVModel *)model;
1271  DnnContext *ctx = ov_model->ctx;
1272  TaskItem task;
1273  OVRequestItem *request;
1274  DNNExecBaseParams exec_params = {
1275  .input_name = input_name,
1276  .output_names = output_name ? &output_name : NULL,
1277  .nb_output = 1,
1278  .in_frame = NULL,
1279  .out_frame = NULL,
1280  };
1281 
1282  if (ov_model->model.func_type != DFT_PROCESS_FRAME) {
1283  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
1284  return AVERROR(EINVAL);
1285  }
1286 
1287 #if HAVE_OPENVINO2
1288  if (ctx->ov_option.input_resizable) {
1289  status = ov_partial_shape_create(4, dims, &partial_shape);
1290  if (status != OK) {
1291  av_log(ctx, AV_LOG_ERROR, "Failed to create partial shape.\n");
1292  return ov2_map_error(status, NULL);
1293  }
1294  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
1295  if (status != OK) {
1296  av_log(ctx, AV_LOG_ERROR, "Failed to create shape for model input resize.\n");
1297  return ov2_map_error(status, NULL);
1298  }
1299  input_shape.dims[2] = input_height;
1300  input_shape.dims[3] = input_width;
1301 
1302  status = ov_shape_to_partial_shape(input_shape, &partial_shape);
1303  ov_shape_free(&input_shape);
1304  if (status != OK) {
1305  av_log(ctx, AV_LOG_ERROR, "Failed to create partial shape for model input resize.\n");
1306  return ov2_map_error(status, NULL);
1307  }
1308 
1309  status = ov_model_reshape_single_input(ov_model->ov_model, partial_shape);
1310  ov_partial_shape_free(&partial_shape);
1311  if (status != OK) {
1312  av_log(ctx, AV_LOG_ERROR, "Failed to reszie model input.\n");
1313  return ov2_map_error(status, NULL);
1314  }
1315  }
1316 
1317  if (!ov_model->compiled_model) {
1318 #else
1319  if (ctx->ov_option.input_resizable) {
1320  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
1321  input_shapes.shapes->shape.dims[2] = input_height;
1322  input_shapes.shapes->shape.dims[3] = input_width;
1323  status |= ie_network_reshape(ov_model->network, input_shapes);
1324  ie_network_input_shapes_free(&input_shapes);
1325  if (status != OK) {
1326  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
1327  return DNN_GENERIC_ERROR;
1328  }
1329  }
1330  if (!ov_model->exe_network) {
1331 #endif
1332  ret = init_model_ov(ov_model, input_name, output_name ? &output_name : NULL, 1);
1333  if (ret != 0) {
1334  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
1335  return ret;
1336  }
1337  }
1338 
1339  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx);
1340  if (ret != 0) {
1341  goto err;
1342  }
1343 
1344  ret = extract_lltask_from_task(ov_model->model.func_type, &task, ov_model->lltask_queue, NULL);
1345  if (ret != 0) {
1346  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
1347  goto err;
1348  }
1349 
1350  request = ff_safe_queue_pop_front(ov_model->request_queue);
1351  if (!request) {
1352  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1353  ret = AVERROR(EINVAL);
1354  goto err;
1355  }
1356 
1357  ret = execute_model_ov(request, ov_model->lltask_queue);
1358  *output_width = task.out_frame->width;
1359  *output_height = task.out_frame->height;
1360 err:
1361  av_frame_free(&task.out_frame);
1362  av_frame_free(&task.in_frame);
1363  return ret;
1364 }
1365 
1366 static DNNModel *dnn_load_model_ov(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
1367 {
1368  DNNModel *model = NULL;
1369  OVModel *ov_model = NULL;
1370 #if HAVE_OPENVINO2
1371  ov_core_t* core = NULL;
1372  ov_model_t* ovmodel = NULL;
1373  ov_status_e status;
1374 #else
1375  size_t node_count = 0;
1376  char *node_name = NULL;
1377  IEStatusCode status;
1378 #endif
1379 
1380  ov_model = av_mallocz(sizeof(OVModel));
1381  if (!ov_model)
1382  return NULL;
1383  ov_model->ctx = ctx;
1384  model = &ov_model->model;
1385 
1386 #if HAVE_OPENVINO2
1387  status = ov_core_create(&core);
1388  if (status != OK) {
1389  goto err;
1390  }
1391  ov_model->core = core;
1392 
1393  status = ov_core_read_model(core, ctx->model_filename, NULL, &ovmodel);
1394  if (status != OK) {
1395  ov_version_t ver;
1396  status = ov_get_openvino_version(&ver);
1397  av_log(NULL, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
1398  "Please check if the model version matches the runtime OpenVINO Version:\n",
1399  ctx->model_filename);
1400  if (status == OK) {
1401  av_log(NULL, AV_LOG_ERROR, "BuildNumber: %s\n", ver.buildNumber);
1402  }
1403  ov_version_free(&ver);
1404  goto err;
1405  }
1406  ov_model->ov_model = ovmodel;
1407 #else
1408  ov_model->all_input_names = NULL;
1409  ov_model->all_output_names = NULL;
1410 
1411  status = ie_core_create("", &ov_model->core);
1412  if (status != OK)
1413  goto err;
1414 
1415  status = ie_core_read_network(ov_model->core, ctx->model_filename, NULL, &ov_model->network);
1416  if (status != OK) {
1417  ie_version_t ver;
1418  ver = ie_c_api_version();
1419  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
1420  "Please check if the model version matches the runtime OpenVINO %s\n",
1421  ctx->model_filename, ver.api_version);
1422  ie_version_free(&ver);
1423  goto err;
1424  }
1425 
1426  //get all the input and output names
1427  status = ie_network_get_inputs_number(ov_model->network, &node_count);
1428  if (status != OK) {
1429  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
1430  goto err;
1431  }
1432  for (size_t i = 0; i < node_count; i++) {
1433  status = ie_network_get_input_name(ov_model->network, i, &node_name);
1434  if (status != OK) {
1435  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
1436  goto err;
1437  }
1438  APPEND_STRING(ov_model->all_input_names, node_name)
1439  ie_network_name_free(&node_name);
1440  }
1441  status = ie_network_get_outputs_number(ov_model->network, &node_count);
1442  if (status != OK) {
1443  av_log(ctx, AV_LOG_ERROR, "Failed to get output count\n");
1444  goto err;
1445  }
1446  for (size_t i = 0; i < node_count; i++) {
1447  status = ie_network_get_output_name(ov_model->network, i, &node_name);
1448  if (status != OK) {
1449  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d output's name\n", (int)i);
1450  goto err;
1451  }
1452  APPEND_STRING(ov_model->all_output_names, node_name)
1453  ie_network_name_free(&node_name);
1454  }
1455 #endif
1456 
1457  model->get_input = &get_input_ov;
1458  model->get_output = &get_output_ov;
1459  model->filter_ctx = filter_ctx;
1460  model->func_type = func_type;
1461 
1462  return model;
1463 
1464 err:
1465  dnn_free_model_ov(&model);
1466  return NULL;
1467 }
1468 
1469 static int dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
1470 {
1471  OVModel *ov_model = (OVModel *)model;
1472  DnnContext *ctx = ov_model->ctx;
1473  OVRequestItem *request;
1474  TaskItem *task;
1475  int ret;
1476 
1477  ret = ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params);
1478  if (ret != 0) {
1479  return ret;
1480  }
1481 
1482 #if HAVE_OPENVINO2
1483  if (!ov_model->compiled_model) {
1484 #else
1485  if (!ov_model->exe_network) {
1486 #endif
1487  ret = init_model_ov(ov_model, exec_params->input_name,
1488  exec_params->output_names, exec_params->nb_output);
1489  if (ret != 0) {
1490  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
1491  return ret;
1492  }
1493  }
1494 
1495  task = av_malloc(sizeof(*task));
1496  if (!task) {
1497  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
1498  return AVERROR(ENOMEM);
1499  }
1500 
1501  ret = ff_dnn_fill_task(task, exec_params, ov_model, ctx->async, 1);
1502  if (ret != 0) {
1503  av_freep(&task);
1504  return ret;
1505  }
1506 
1507  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
1508  av_freep(&task);
1509  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
1510  return AVERROR(ENOMEM);
1511  }
1512 
1513  ret = extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params);
1514  if (ret != 0) {
1515  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
1516  return ret;
1517  }
1518 
1519  if (ctx->async) {
1520  while (ff_queue_size(ov_model->lltask_queue) >= ctx->ov_option.batch_size) {
1521  request = ff_safe_queue_pop_front(ov_model->request_queue);
1522  if (!request) {
1523  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1524  return AVERROR(EINVAL);
1525  }
1526 
1527  ret = execute_model_ov(request, ov_model->lltask_queue);
1528  if (ret != 0) {
1529  return ret;
1530  }
1531  }
1532 
1533  return 0;
1534  }
1535  else {
1536  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
1537  // Classification filter has not been completely
1538  // tested with the sync mode. So, do not support now.
1539  avpriv_report_missing_feature(ctx, "classify for sync execution");
1540  return AVERROR(ENOSYS);
1541  }
1542 
1543  if (ctx->ov_option.batch_size > 1) {
1544  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
1545  return AVERROR(ENOSYS);
1546  }
1547 
1548  request = ff_safe_queue_pop_front(ov_model->request_queue);
1549  if (!request) {
1550  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1551  return AVERROR(EINVAL);
1552  }
1553  return execute_model_ov(request, ov_model->lltask_queue);
1554  }
1555 }
1556 
1557 static DNNAsyncStatusType dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
1558 {
1559  OVModel *ov_model = (OVModel *)model;
1560  return ff_dnn_get_result_common(ov_model->task_queue, in, out);
1561 }
1562 
1563 static int dnn_flush_ov(const DNNModel *model)
1564 {
1565  OVModel *ov_model = (OVModel *)model;
1566  DnnContext *ctx = ov_model->ctx;
1567  OVRequestItem *request;
1568 #if HAVE_OPENVINO2
1569  ov_status_e status;
1570 #else
1571  IEStatusCode status;
1572 #endif
1573  int ret;
1574 
1575  if (ff_queue_size(ov_model->lltask_queue) == 0) {
1576  // no pending task need to flush
1577  return 0;
1578  }
1579 
1580  request = ff_safe_queue_pop_front(ov_model->request_queue);
1581  if (!request) {
1582  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1583  return AVERROR(EINVAL);
1584  }
1585 
1586  ret = fill_model_input_ov(ov_model, request);
1587  if (ret != 0) {
1588  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
1589  return ret;
1590  }
1591 #if HAVE_OPENVINO2
1592  status = ov_infer_request_infer(request->infer_request);
1593  if (status != OK) {
1594  av_log(ctx, AV_LOG_ERROR, "Failed to start sync inference for OV2\n");
1595  return ov2_map_error(status, NULL);
1596  }
1597 #else
1598  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
1599  if (status != OK) {
1600  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1601  return DNN_GENERIC_ERROR;
1602  }
1603  status = ie_infer_request_infer_async(request->infer_request);
1604  if (status != OK) {
1605  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1606  return DNN_GENERIC_ERROR;
1607  }
1608 #endif
1609 
1610  return 0;
1611 }
1612 
1614  .clazz = DNN_DEFINE_CLASS(dnn_openvino),
1615  .type = DNN_OV,
1616  .load_model = dnn_load_model_ov,
1617  .execute_model = dnn_execute_model_ov,
1618  .get_result = dnn_get_result_ov,
1619  .flush = dnn_flush_ov,
1620  .free_model = dnn_free_model_ov,
1621 };
OVModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_openvino.c:63
ff_dnn_backend_openvino
const DNNModule ff_dnn_backend_openvino
OVModel::input_info
ov_preprocess_input_info_t * input_info
Definition: dnn_backend_openvino.c:50
OVRequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:75
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:55
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:963
OVModel::nb_outputs
int nb_outputs
Definition: dnn_backend_openvino.c:64
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:57
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:56
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:30
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
LastLevelTaskItem
Definition: dnn_backend_common.h:57
LastLevelTaskItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_common.h:59
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVFrame::width
int width
Definition: frame.h:461
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:429
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:110
ov2_map_error
static int ov2_map_error(ov_status_e status, const char **desc)
Definition: dnn_backend_openvino.c:124
data
const char data[16]
Definition: mxf.c:149
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:55
FLAGS
#define FLAGS
Definition: cmdutils.c:595
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:81
dnn_io_proc.h
TaskItem
Definition: dnn_backend_common.h:43
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:42
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
init_model_ov
static int init_model_ov(OVModel *ov_model, const char *input_name, const char **output_names, int nb_outputs)
Definition: dnn_backend_openvino.c:577
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:99
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
dnn_get_width_idx_by_layout
static int dnn_get_width_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:197
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
TaskItem::model
void * model
Definition: dnn_backend_common.h:44
OVModel::output_info
ov_preprocess_output_info_t * output_info
Definition: dnn_backend_openvino.c:52
DnnContext
Definition: dnn_interface.h:143
OVRequestItem::infer_request
ov_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:72
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:52
OVModel::output_ports
ov_output_const_port_t ** output_ports
Definition: dnn_backend_openvino.c:51
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
DL_NHWC
@ DL_NHWC
Definition: dnn_interface.h:66
Queue
Linear double-ended data structure.
Definition: executor.c:51
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
AVFrameSideData::size
size_t size
Definition: frame.h:268
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
OVModel::model
DNNModel model
Definition: dnn_backend_openvino.c:43
float
float
Definition: af_crystalizer.c:122
desc
const char * desc
Definition: dnn_backend_openvino.c:102
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:58
ov2_errors
static const struct @311 ov2_errors[]
DNNExecClassificationParams
Definition: dnn_interface.h:88
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:69
DNNModule::clazz
const AVClass clazz
Definition: dnn_interface.h:176
execute_model_ov
static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:964
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:156
DNNModel::get_output
int(* get_output)(struct DNNModel *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:106
ctx
AVFormatContext * ctx
Definition: movenc.c:49
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:52
DL_NCHW
@ DL_NCHW
Definition: dnn_interface.h:65
dnn_free_model_ov
static void dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:512
OVRequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:76
OVModel::preprocess
ov_preprocess_prepostprocessor_t * preprocess
Definition: dnn_backend_openvino.c:53
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:37
if
if(ret)
Definition: filter_design.txt:179
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:182
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:90
OVModel::all_input_names
const char * all_input_names
Definition: dnn_backend_openvino.c:58
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:423
NULL
#define NULL
Definition: coverity.c:32
OVRequestItem::lltask_count
uint32_t lltask_count
Definition: dnn_backend_openvino.c:70
av_err
int av_err
Definition: dnn_backend_openvino.c:101
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:56
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:113
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:45
OVRequestItem::lltasks
LastLevelTaskItem ** lltasks
Definition: dnn_backend_openvino.c:69
OVRequestItem
Definition: dnn_backend_openvino.c:68
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:221
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:217
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:162
f
f
Definition: af_crystalizer.c:122
OVModel::compiled_model
ov_compiled_model_t * compiled_model
Definition: dnn_backend_openvino.c:48
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:49
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:53
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
cpu.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:115
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:101
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:41
OVModel::all_output_names
const char * all_output_names
Definition: dnn_backend_openvino.c:59
header
static const uint8_t header[24]
Definition: sdr2.c:68
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:41
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:50
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
DNN_DEFINE_CLASS
#define DNN_DEFINE_CLASS(fname)
Definition: dnn_backend_common.h:39
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:1154
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Underlying C type is float.
Definition: opt.h:271
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:86
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:334
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:59
OVModel::ov_model
ov_model_t * ov_model
Definition: dnn_backend_openvino.c:47
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
get_output_ov
static int get_output_ov(DNNModel *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:1257
OVModel::ctx
DnnContext * ctx
Definition: dnn_backend_openvino.c:44
OVModel::core
ov_core_t * core
Definition: dnn_backend_openvino.c:46
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
safe_queue.h
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:61
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:48
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:117
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:41
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:46
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:62
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:60
AVFrame::height
int height
Definition: frame.h:461
extract_lltask_from_task
static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:1191
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
get_input_ov
static int get_input_ov(DNNModel *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:1061
dnn_backend_common.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:136
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:866
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:82
DL_NONE
@ DL_NONE
Definition: dnn_interface.h:64
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
DNNModel
Definition: dnn_interface.h:97
precision_to_datatype
static DNNDataType precision_to_datatype(ov_element_type_e precision) static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:141
mem.h
dnn_get_height_idx_by_layout
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:202
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:47
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
ff_frame_to_dnn_classify
int ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:340
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
DNNExecBaseParams
Definition: dnn_interface.h:80
DNNModel::get_input
int(* get_input)(struct DNNModel *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:104
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
OVModel::input_port
ov_output_const_port_t * input_port
Definition: dnn_backend_openvino.c:49
AVDetectionBBox
Definition: detection_bbox.h:26
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:50
OVRequestItem::callback
ov_callback_t callback
Definition: dnn_backend_openvino.c:73
avstring.h
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:45
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:49
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:58
detection_bbox.h
fill_model_input_ov
static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
Definition: dnn_backend_openvino.c:176
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:194
DNNModule
Definition: dnn_interface.h:175
DNNExecBaseParams::nb_output
uint32_t nb_output
Definition: dnn_interface.h:83
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:42