FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_io_proc.h"
27 #include "libavformat/avio.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/avstring.h"
34 #include "../internal.h"
35 #include "safe_queue.h"
36 #if HAVE_OPENVINO2
37 #include <openvino/c/openvino.h>
38 #else
39 #include <c_api/ie_c_api.h>
40 #endif
41 #include "dnn_backend_common.h"
42 
43 typedef struct OVModel{
46 #if HAVE_OPENVINO2
47  ov_core_t *core;
48  ov_model_t *ov_model;
49  ov_compiled_model_t *compiled_model;
50  ov_output_const_port_t* input_port;
51  ov_preprocess_input_info_t* input_info;
52  ov_output_const_port_t** output_ports;
53  ov_preprocess_output_info_t* output_info;
54  ov_preprocess_prepostprocessor_t* preprocess;
55 #else
56  ie_core_t *core;
57  ie_network_t *network;
58  ie_executable_network_t *exe_network;
59  const char *all_input_names;
60  const char *all_output_names;
61 #endif
62  SafeQueue *request_queue; // holds OVRequestItem
63  Queue *task_queue; // holds TaskItem
64  Queue *lltask_queue; // holds LastLevelTaskItem
66 } OVModel;
67 
68 // one request for one call to openvino
69 typedef struct OVRequestItem {
71  uint32_t lltask_count;
72 #if HAVE_OPENVINO2
73  ov_infer_request_t *infer_request;
74  ov_callback_t callback;
75 #else
76  ie_complete_call_back_t callback;
77  ie_infer_request_t *infer_request;
78 #endif
80 
81 #define APPEND_STRING(generated_string, iterate_string) \
82  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
83  av_asprintf("%s", iterate_string);
84 
85 #define OFFSET(x) offsetof(OVOptions, x)
86 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
87 static const AVOption dnn_openvino_options[] = {
88  { "batch_size", "batch size per request", OFFSET(batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
89  { "input_resizable", "can input be resizable or not", OFFSET(input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
90  { "layout", "input layout of model", OFFSET(layout), AV_OPT_TYPE_INT, { .i64 = DL_NONE}, DL_NONE, DL_NHWC, FLAGS, .unit = "layout" },
91  { "none", "none", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NONE }, 0, 0, FLAGS, .unit = "layout"},
92  { "nchw", "nchw", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NCHW }, 0, 0, FLAGS, .unit = "layout"},
93  { "nhwc", "nhwc", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NHWC }, 0, 0, FLAGS, .unit = "layout"},
94  { "scale", "Add scale preprocess operation. Divide each element of input by specified value.", OFFSET(scale), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX, FLAGS},
95  { "mean", "Add mean preprocess operation. Subtract specified value from each element of input.", OFFSET(mean), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX, FLAGS},
96  { NULL }
97 };
98 
99 #if HAVE_OPENVINO2
100 static const struct {
101  ov_status_e status;
102  int av_err;
103  const char *desc;
104 } ov2_errors[] = {
105  { OK, 0, "success" },
106  { GENERAL_ERROR, AVERROR_EXTERNAL, "general error" },
107  { NOT_IMPLEMENTED, AVERROR(ENOSYS), "not implemented" },
108  { NETWORK_NOT_LOADED, AVERROR_EXTERNAL, "network not loaded" },
109  { PARAMETER_MISMATCH, AVERROR(EINVAL), "parameter mismatch" },
110  { NOT_FOUND, AVERROR_EXTERNAL, "not found" },
111  { OUT_OF_BOUNDS, AVERROR(EOVERFLOW), "out of bounds" },
112  { UNEXPECTED, AVERROR_EXTERNAL, "unexpected" },
113  { REQUEST_BUSY, AVERROR(EBUSY), "request busy" },
114  { RESULT_NOT_READY, AVERROR(EBUSY), "result not ready" },
115  { NOT_ALLOCATED, AVERROR(ENODATA), "not allocated" },
116  { INFER_NOT_STARTED, AVERROR_EXTERNAL, "infer not started" },
117  { NETWORK_NOT_READ, AVERROR_EXTERNAL, "network not read" },
118  { INFER_CANCELLED, AVERROR(ECANCELED), "infer cancelled" },
119  { INVALID_C_PARAM, AVERROR(EINVAL), "invalid C parameter" },
120  { UNKNOWN_C_ERROR, AVERROR_UNKNOWN, "unknown C error" },
121  { NOT_IMPLEMENT_C_METHOD, AVERROR(ENOSYS), "not implement C method" },
122  { UNKNOW_EXCEPTION, AVERROR_UNKNOWN, "unknown exception" },
123 };
124 
125 static int ov2_map_error(ov_status_e status, const char **desc)
126 {
127  int i;
128  for (i = 0; i < FF_ARRAY_ELEMS(ov2_errors); i++) {
129  if (ov2_errors[i].status == status) {
130  if (desc)
131  *desc = ov2_errors[i].desc;
132  return ov2_errors[i].av_err;
133  }
134  }
135  if (desc)
136  *desc = "unknown error";
137  return AVERROR_UNKNOWN;
138 }
139 #endif
140 
141 #if HAVE_OPENVINO2
142 static DNNDataType precision_to_datatype(ov_element_type_e precision)
143 #else
144 static DNNDataType precision_to_datatype(precision_e precision)
145 #endif
146 {
147  switch (precision)
148  {
149 #if HAVE_OPENVINO2
150  case F32:
151 #else
152  case FP32:
153 #endif
154  return DNN_FLOAT;
155  case U8:
156  return DNN_UINT8;
157  default:
158  av_assert0(!"not supported yet.");
159  return DNN_FLOAT;
160  }
161 }
162 
164 {
165  switch (dt)
166  {
167  case DNN_FLOAT:
168  return sizeof(float);
169  case DNN_UINT8:
170  return sizeof(uint8_t);
171  default:
172  av_assert0(!"not supported yet.");
173  return 1;
174  }
175 }
176 
177 static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
178 {
179  DNNData input;
180  LastLevelTaskItem *lltask;
181  TaskItem *task;
182  DnnContext *ctx = ov_model->ctx;
183 #if HAVE_OPENVINO2
184  int64_t* dims;
185  ov_status_e status;
186  ov_tensor_t* tensor = NULL;
187  ov_shape_t input_shape = {0};
188  ov_element_type_e precision;
189  char *port_name;
190 #else
191  dimensions_t dims;
192  precision_e precision;
193  ie_blob_buffer_t blob_buffer;
194  IEStatusCode status;
195  ie_blob_t *input_blob = NULL;
196 #endif
197 
198  memset(&input, 0, sizeof(input));
199  lltask = ff_queue_peek_front(ov_model->lltask_queue);
200  av_assert0(lltask);
201  task = lltask->task;
202 
203 #if HAVE_OPENVINO2
204  if (ov_model->input_port) {
205  ov_output_const_port_free(ov_model->input_port);
206  ov_model->input_port = NULL;
207  }
208  if (task->input_name)
209  status = ov_model_const_input_by_name(ov_model->ov_model, task->input_name, &ov_model->input_port);
210  else
211  status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
212  if (status != OK) {
213  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
214  return ov2_map_error(status, NULL);
215  }
216  status = ov_port_get_any_name(ov_model->input_port, &port_name);
217  if (status != OK) {
218  av_log(ctx, AV_LOG_ERROR, "Failed to get input port name.\n");
219  return ov2_map_error(status, NULL);
220  }
221  av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model input: %s\n", port_name);
222  ov_free(port_name);
223  port_name = NULL;
224 
225  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
226  if (status != OK) {
227  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
228  return ov2_map_error(status, NULL);
229  }
230  dims = input_shape.dims;
231  status = ov_port_get_element_type(ov_model->input_port, &precision);
232  if (status != OK) {
233  av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
234  ov_shape_free(&input_shape);
235  return ov2_map_error(status, NULL);
236  }
237  for (int i = 0; i < input_shape.rank; i++)
238  input.dims[i] = dims[i];
239  input.layout = DL_NHWC;
240  input.dt = precision_to_datatype(precision);
241 #else
242  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
243  if (status != OK) {
244  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
245  return DNN_GENERIC_ERROR;
246  }
247 
248  status |= ie_blob_get_dims(input_blob, &dims);
249  status |= ie_blob_get_precision(input_blob, &precision);
250  if (status != OK) {
251  ie_blob_free(&input_blob);
252  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
253  return DNN_GENERIC_ERROR;
254  }
255 
256  status = ie_blob_get_buffer(input_blob, &blob_buffer);
257  if (status != OK) {
258  ie_blob_free(&input_blob);
259  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
260  return DNN_GENERIC_ERROR;
261  }
262  for (int i = 0; i < input_shape.rank; i++)
263  input.dims[i] = dims[i];
264  input.layout = DL_NCHW;
265  input.data = blob_buffer.buffer;
266  input.dt = precision_to_datatype(precision);
267 #endif
268  // all models in openvino open model zoo use BGR as input,
269  // change to be an option when necessary.
270  input.order = DCO_BGR;
271  // We use preprocess_steps to scale input data, so disable scale and mean here.
272  input.scale = 1;
273  input.mean = 0;
274 
275  for (int i = 0; i < ctx->ov_option.batch_size; ++i) {
276  lltask = ff_queue_pop_front(ov_model->lltask_queue);
277  if (!lltask) {
278  break;
279  }
280  request->lltasks[i] = lltask;
281  request->lltask_count = i + 1;
282  task = lltask->task;
283 #if HAVE_OPENVINO2
284  if (tensor)
285  ov_tensor_free(tensor);
286  status = ov_tensor_create(precision, input_shape, &tensor);
287  ov_shape_free(&input_shape);
288  if (status != OK) {
289  av_log(ctx, AV_LOG_ERROR, "Failed to create tensor from host prt.\n");
290  return ov2_map_error(status, NULL);
291  }
292  status = ov_tensor_data(tensor, &input.data);
293  if (status != OK) {
294  av_log(ctx, AV_LOG_ERROR, "Failed to get input data.\n");
295  return ov2_map_error(status, NULL);
296  }
297  status = ov_infer_request_set_input_tensor(request->infer_request, tensor);
298  if (status != OK) {
299  av_log(ctx, AV_LOG_ERROR, "Failed to Set an input tensor for the model.\n");
300  return ov2_map_error(status, NULL);
301  }
302 #endif
303  switch (ov_model->model.func_type) {
304  case DFT_PROCESS_FRAME:
305  if (task->do_ioproc) {
306  if (ov_model->model.frame_pre_proc != NULL) {
307  ov_model->model.frame_pre_proc(task->in_frame, &input, ov_model->model.filter_ctx);
308  } else {
310  }
311  }
312  break;
315  break;
318  break;
319  default:
320  av_assert0(!"should not reach here");
321  break;
322  }
323  input.data = (uint8_t *)input.data +
324  input.dims[1] * input.dims[2] * input.dims[3] * get_datatype_size(input.dt);
325  }
326 #if HAVE_OPENVINO2
327  ov_tensor_free(tensor);
328 #else
329  ie_blob_free(&input_blob);
330 #endif
331 
332  return 0;
333 }
334 
335 static void infer_completion_callback(void *args)
336 {
337  OVRequestItem *request = args;
338  LastLevelTaskItem *lltask = request->lltasks[0];
339  TaskItem *task = lltask->task;
340  OVModel *ov_model = task->model;
341  SafeQueue *requestq = ov_model->request_queue;
342  DNNData *outputs;
343  DnnContext *ctx = ov_model->ctx;
344 #if HAVE_OPENVINO2
345  size_t* dims;
346  ov_status_e status;
347  ov_tensor_t *output_tensor;
348  ov_shape_t output_shape = {0};
349  ov_element_type_e precision;
350 
351  outputs = av_calloc(ov_model->nb_outputs, sizeof(*outputs));
352  if (!outputs) {
353  av_log(ctx, AV_LOG_ERROR, "Failed to alloc outputs.");
354  return;
355  }
356 
357  for (int i = 0; i < ov_model->nb_outputs; i++) {
358  status = ov_infer_request_get_tensor_by_const_port(request->infer_request,
359  ov_model->output_ports[i],
360  &output_tensor);
361  if (status != OK) {
363  "Failed to get output tensor.");
364  goto end;
365  }
366 
367  status = ov_tensor_data(output_tensor, &outputs[i].data);
368  if (status != OK) {
370  "Failed to get output data.");
371  goto end;
372  }
373 
374  status = ov_tensor_get_shape(output_tensor, &output_shape);
375  if (status != OK) {
376  av_log(ctx, AV_LOG_ERROR, "Failed to get output port shape.\n");
377  goto end;
378  }
379  dims = output_shape.dims;
380 
381  status = ov_port_get_element_type(ov_model->output_ports[i], &precision);
382  if (status != OK) {
383  av_log(ctx, AV_LOG_ERROR, "Failed to get output port data type.\n");
384  goto end;
385  }
386  outputs[i].dt = precision_to_datatype(precision);
387  outputs[i].layout = DL_NCHW;
388  outputs[i].dims[0] = 1;
389  outputs[i].dims[1] = output_shape.rank > 2 ? dims[output_shape.rank - 3] : 1;
390  outputs[i].dims[2] = output_shape.rank > 1 ? dims[output_shape.rank - 2] : 1;
391  outputs[i].dims[3] = output_shape.rank > 0 ? dims[output_shape.rank - 1] : 1;
392  av_assert0(request->lltask_count <= dims[0]);
393  outputs[i].layout = ctx->ov_option.layout;
394  outputs[i].scale = ctx->ov_option.scale;
395  outputs[i].mean = ctx->ov_option.mean;
396  ov_shape_free(&output_shape);
397  ov_tensor_free(output_tensor);
398  output_tensor = NULL;
399  }
400 #else
401  IEStatusCode status;
402  dimensions_t dims;
403  ie_blob_t *output_blob = NULL;
404  ie_blob_buffer_t blob_buffer;
405  precision_e precision;
406  DNNData output;
407  status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
408  if (status != OK) {
410  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
411  task->output_names[0], ov_model->all_output_names);
412  return;
413  }
414 
415  status = ie_blob_get_buffer(output_blob, &blob_buffer);
416  if (status != OK) {
417  ie_blob_free(&output_blob);
418  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
419  return;
420  }
421 
422  status |= ie_blob_get_dims(output_blob, &dims);
423  status |= ie_blob_get_precision(output_blob, &precision);
424  if (status != OK) {
425  ie_blob_free(&output_blob);
426  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
427  return;
428  }
429  output.data = blob_buffer.buffer;
430  output.layout = DL_NCHW;
431  for (int i = 0; i < 4; i++)
432  output.dims[i] = dims.dims[i];
433  av_assert0(request->lltask_count <= dims.dims[0]);
434  output.dt = precision_to_datatype(precision);
435  output.layout = ctx->ov_option.layout;
436  output.scale = ctx->ov_option.scale;
437  output.mean = ctx->ov_option.mean;
438  outputs = &output;
439 #endif
440 
441  av_assert0(request->lltask_count >= 1);
442  for (int i = 0; i < request->lltask_count; ++i) {
443  task = request->lltasks[i]->task;
444 
445  switch (ov_model->model.func_type) {
446  case DFT_PROCESS_FRAME:
447  if (task->do_ioproc) {
448  if (ov_model->model.frame_post_proc != NULL) {
449  ov_model->model.frame_post_proc(task->out_frame, outputs, ov_model->model.filter_ctx);
450  } else {
452  }
453  } else {
454  task->out_frame->width =
456  task->out_frame->height =
458  }
459  break;
461  if (!ov_model->model.detect_post_proc) {
462  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
463  goto end;
464  }
465  ov_model->model.detect_post_proc(task->in_frame, outputs,
466  ov_model->nb_outputs,
467  ov_model->model.filter_ctx);
468  break;
470  if (!ov_model->model.classify_post_proc) {
471  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
472  goto end;
473  }
474  for (int output_i = 0; output_i < ov_model->nb_outputs; output_i++)
475  ov_model->model.classify_post_proc(task->in_frame, outputs,
476  request->lltasks[i]->bbox_index,
477  ov_model->model.filter_ctx);
478  break;
479  default:
480  av_assert0(!"should not reach here");
481  break;
482  }
483 
484  task->inference_done++;
485  av_freep(&request->lltasks[i]);
486  for (int i = 0; i < ov_model->nb_outputs; i++)
487  outputs[i].data = (uint8_t *)outputs[i].data +
488  outputs[i].dims[1] * outputs[i].dims[2] * outputs[i].dims[3] *
490  }
491 end:
492 #if HAVE_OPENVINO2
493  av_freep(&outputs);
494  ov_shape_free(&output_shape);
495  if (output_tensor)
496  ov_tensor_free(output_tensor);
497 #else
498  ie_blob_free(&output_blob);
499 #endif
500  request->lltask_count = 0;
501  if (ff_safe_queue_push_back(requestq, request) < 0) {
502 #if HAVE_OPENVINO2
503  ov_infer_request_free(request->infer_request);
504 #else
505  ie_infer_request_free(&request->infer_request);
506 #endif
507  av_freep(&request);
508  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
509  return;
510  }
511 }
512 
513 static void dnn_free_model_ov(DNNModel **model)
514 {
515  OVModel *ov_model;
516 
517  if (!model || !*model)
518  return;
519 
520  ov_model = (OVModel *)(*model);
521  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
523  if (item && item->infer_request) {
524 #if HAVE_OPENVINO2
525  ov_infer_request_free(item->infer_request);
526 #else
527  ie_infer_request_free(&item->infer_request);
528 #endif
529  }
530  av_freep(&item->lltasks);
531  av_freep(&item);
532  }
534 
535  while (ff_queue_size(ov_model->lltask_queue) != 0) {
537  av_freep(&item);
538  }
539  ff_queue_destroy(ov_model->lltask_queue);
540 
541  while (ff_queue_size(ov_model->task_queue) != 0) {
542  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
543  av_frame_free(&item->in_frame);
544  av_frame_free(&item->out_frame);
545  av_freep(&item);
546  }
547  ff_queue_destroy(ov_model->task_queue);
548 #if HAVE_OPENVINO2
549  if (ov_model->input_port)
550  ov_output_const_port_free(ov_model->input_port);
551  for (int i = 0; i < ov_model->nb_outputs; i++)
552  if (ov_model->output_ports[i])
553  ov_output_const_port_free(ov_model->output_ports[i]);
554  av_freep(&ov_model->output_ports);
555  if (ov_model->preprocess)
556  ov_preprocess_prepostprocessor_free(ov_model->preprocess);
557  if (ov_model->compiled_model)
558  ov_compiled_model_free(ov_model->compiled_model);
559  if (ov_model->ov_model)
560  ov_model_free(ov_model->ov_model);
561  if (ov_model->core)
562  ov_core_free(ov_model->core);
563 #else
564  if (ov_model->exe_network)
565  ie_exec_network_free(&ov_model->exe_network);
566  if (ov_model->network)
567  ie_network_free(&ov_model->network);
568  if (ov_model->core)
569  ie_core_free(&ov_model->core);
570  av_free(ov_model->all_output_names);
571  av_free(ov_model->all_input_names);
572 #endif
573  av_freep(&ov_model);
574  *model = NULL;
575 }
576 
577 
578 static int init_model_ov(OVModel *ov_model, const char *input_name, const char **output_names, int nb_outputs)
579 {
580  int ret = 0;
581  DnnContext *ctx = ov_model->ctx;
582 #if HAVE_OPENVINO2
583  ov_status_e status;
584  ov_preprocess_input_tensor_info_t* input_tensor_info = NULL;
585  ov_preprocess_output_tensor_info_t* output_tensor_info = NULL;
586  ov_preprocess_input_model_info_t* input_model_info = NULL;
587  ov_model_t *tmp_ov_model;
588  ov_layout_t* NHWC_layout = NULL;
589  ov_layout_t* NCHW_layout = NULL;
590  const char* NHWC_desc = "NHWC";
591  const char* NCHW_desc = "NCHW";
592  const char* device = ctx->device ? ctx->device : "CPU";
593 #else
594  IEStatusCode status;
595  ie_available_devices_t a_dev;
596  ie_config_t config = {NULL, NULL, NULL};
597  char *all_dev_names = NULL;
598 #endif
599  // We scale pixel by default when do frame processing.
600  if (fabsf(ctx->ov_option.scale) < 1e-6f)
601  ctx->ov_option.scale = ov_model->model.func_type == DFT_PROCESS_FRAME ? 255 : 1;
602  // batch size
603  if (ctx->ov_option.batch_size <= 0) {
604  ctx->ov_option.batch_size = 1;
605  }
606 #if HAVE_OPENVINO2
607  if (ctx->ov_option.batch_size > 1) {
608  avpriv_report_missing_feature(ctx, "Do not support batch_size > 1 for now,"
609  "change batch_size to 1.\n");
610  ctx->ov_option.batch_size = 1;
611  }
612 
613  status = ov_preprocess_prepostprocessor_create(ov_model->ov_model, &ov_model->preprocess);
614  if (status != OK) {
615  av_log(ctx, AV_LOG_ERROR, "Failed to create preprocess for ov_model.\n");
617  goto err;
618  }
619 
620  if (input_name)
621  status = ov_preprocess_prepostprocessor_get_input_info_by_name(ov_model->preprocess, input_name, &ov_model->input_info);
622  else
623  status = ov_preprocess_prepostprocessor_get_input_info(ov_model->preprocess, &ov_model->input_info);
624  if (status != OK) {
625  av_log(ctx, AV_LOG_ERROR, "Failed to get input info from preprocess.\n");
627  goto err;
628  }
629 
630  status = ov_preprocess_input_info_get_tensor_info(ov_model->input_info, &input_tensor_info);
631  if (status != OK) {
632  av_log(ctx, AV_LOG_ERROR, "Failed to get tensor info from input.\n");
634  goto err;
635  }
636 
637  //set input layout
638  status = ov_layout_create(NHWC_desc, &NHWC_layout);
639  status |= ov_layout_create(NCHW_desc, &NCHW_layout);
640  if (status != OK) {
641  av_log(ctx, AV_LOG_ERROR, "Failed to create layout for input.\n");
643  goto err;
644  }
645 
646  status = ov_preprocess_input_tensor_info_set_layout(input_tensor_info, NHWC_layout);
647  if (status != OK) {
648  av_log(ctx, AV_LOG_ERROR, "Failed to set input tensor layout\n");
650  goto err;
651  }
652 
653  status = ov_preprocess_input_info_get_model_info(ov_model->input_info, &input_model_info);
654  if (status != OK) {
655  av_log(ctx, AV_LOG_ERROR, "Failed to get input model info\n");
657  goto err;
658  }
659  if (ctx->ov_option.layout == DL_NCHW)
660  status = ov_preprocess_input_model_info_set_layout(input_model_info, NCHW_layout);
661  else if (ctx->ov_option.layout == DL_NHWC)
662  status = ov_preprocess_input_model_info_set_layout(input_model_info, NHWC_layout);
663  if (status != OK) {
664  av_log(ctx, AV_LOG_ERROR, "Failed to get set input model layout\n");
666  goto err;
667  }
668 
669  status = ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, U8);
670  if (status != OK) {
671  av_log(ctx, AV_LOG_ERROR, "Failed to set input element type\n");
673  goto err;
674  }
675 
676  if (!nb_outputs) {
677  size_t output_size;
678  status = ov_model_outputs_size(ov_model->ov_model, &output_size);
679  if (status != OK) {
680  av_log(ctx, AV_LOG_ERROR, "Failed to get output size.\n");
682  goto err;
683  }
684  nb_outputs = output_size;
685  }
686  ov_model->nb_outputs = nb_outputs;
687  for (int i = 0; i < nb_outputs; i++) {
688  if (output_names)
689  status = ov_preprocess_prepostprocessor_get_output_info_by_name(
690  ov_model->preprocess, output_names[i], &ov_model->output_info);
691  else
692  status = ov_preprocess_prepostprocessor_get_output_info_by_index(
693  ov_model->preprocess, i, &ov_model->output_info);
694  if (status != OK) {
695  av_log(ctx, AV_LOG_ERROR, "Failed to get output info from preprocess.\n");
697  goto err;
698  }
699  status |= ov_preprocess_output_info_get_tensor_info(ov_model->output_info, &output_tensor_info);
700  if (status != OK) {
701  av_log(ctx, AV_LOG_ERROR, "Failed to get tensor info from input/output.\n");
703  goto err;
704  }
705  if (ov_model->model.func_type != DFT_PROCESS_FRAME)
706  status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
707  else if (fabsf(ctx->ov_option.scale - 1) > 1e-6f || fabsf(ctx->ov_option.mean) > 1e-6f)
708  status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
709  else
710  status |= ov_preprocess_output_set_element_type(output_tensor_info, U8);
711  if (status != OK) {
712  av_log(ctx, AV_LOG_ERROR, "Failed to set output element type\n");
714  goto err;
715  }
716  ov_preprocess_output_tensor_info_free(output_tensor_info);
717  output_tensor_info = NULL;
718  ov_preprocess_output_info_free(ov_model->output_info);
719  ov_model->output_info = NULL;
720  }
721  // set preprocess steps.
722  if (fabsf(ctx->ov_option.scale - 1) > 1e-6f || fabsf(ctx->ov_option.mean) > 1e-6f) {
723  ov_preprocess_preprocess_steps_t* input_process_steps = NULL;
724  status = ov_preprocess_input_info_get_preprocess_steps(ov_model->input_info, &input_process_steps);
725  if (status != OK) {
726  av_log(ctx, AV_LOG_ERROR, "Failed to get preprocess steps\n");
728  goto err;
729  }
730  status = ov_preprocess_preprocess_steps_convert_element_type(input_process_steps, F32);
731  status |= ov_preprocess_preprocess_steps_mean(input_process_steps, ctx->ov_option.mean);
732  status |= ov_preprocess_preprocess_steps_scale(input_process_steps, ctx->ov_option.scale);
733  if (status != OK) {
734  av_log(ctx, AV_LOG_ERROR, "Failed to set preprocess steps\n");
735  ov_preprocess_preprocess_steps_free(input_process_steps);
736  input_process_steps = NULL;
738  goto err;
739  }
740  ov_preprocess_preprocess_steps_free(input_process_steps);
741  input_process_steps = NULL;
742  }
743  ov_preprocess_input_tensor_info_free(input_tensor_info);
744  input_tensor_info = NULL;
745  ov_preprocess_input_info_free(ov_model->input_info);
746  ov_model->input_info = NULL;
747 
748  //update model
749  if(ov_model->ov_model)
750  tmp_ov_model = ov_model->ov_model;
751  status = ov_preprocess_prepostprocessor_build(ov_model->preprocess, &ov_model->ov_model);
752  if (status != OK) {
753  av_log(ctx, AV_LOG_ERROR, "Failed to update OV model\n");
754  ov_model_free(tmp_ov_model);
755  tmp_ov_model = NULL;
757  goto err;
758  }
759  ov_model_free(tmp_ov_model);
760 
761  //update output_port
762  if (!ov_model->output_ports) {
763  ov_model->output_ports = av_calloc(nb_outputs, sizeof(*ov_model->output_ports));
764  if (!ov_model->output_ports) {
765  ret = AVERROR(ENOMEM);
766  goto err;
767  }
768  } else
769  for (int i = 0; i < nb_outputs; i++) {
770  ov_output_const_port_free(ov_model->output_ports[i]);
771  ov_model->output_ports[i] = NULL;
772  }
773 
774  for (int i = 0; i < nb_outputs; i++) {
775  char *port_name;
776  if (output_names)
777  status = ov_model_const_output_by_name(ov_model->ov_model, output_names[i],
778  &ov_model->output_ports[i]);
779  else
780  status = ov_model_const_output_by_index(ov_model->ov_model, i,
781  &ov_model->output_ports[i]);
782  if (status != OK) {
783  av_log(ctx, AV_LOG_ERROR, "Failed to get output port %s.\n", output_names[i]);
784  goto err;
785  }
786  status = ov_port_get_any_name(ov_model->output_ports[i], &port_name);
787  if (status != OK) {
788  av_log(ctx, AV_LOG_ERROR, "Failed to get output port name.\n");
789  goto err;
790  }
791  av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model outputs: %s\n", port_name);
792  ov_free(port_name);
793  port_name = NULL;
794  }
795  //compile network
796  status = ov_core_compile_model(ov_model->core, ov_model->ov_model, device, 0, &ov_model->compiled_model);
797  if (status != OK) {
799  goto err;
800  }
801  ov_preprocess_input_model_info_free(input_model_info);
802  input_model_info = NULL;
803  ov_layout_free(NCHW_layout);
804  ov_layout_free(NHWC_layout);
805 #else
806  if (ctx->ov_option.batch_size > 1) {
807  input_shapes_t input_shapes;
808  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
809  if (status != OK) {
811  goto err;
812  }
813  for (int i = 0; i < input_shapes.shape_num; i++)
814  input_shapes.shapes[i].shape.dims[0] = ctx->ov_option.batch_size;
815  status = ie_network_reshape(ov_model->network, input_shapes);
816  ie_network_input_shapes_free(&input_shapes);
817  if (status != OK) {
819  goto err;
820  }
821  }
822 
823  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
824  // while we pass NHWC data from FFmpeg to openvino
825  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
826  if (status != OK) {
827  if (status == NOT_FOUND) {
828  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set input layout as NHWC, "\
829  "all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
830  } else{
831  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
832  }
834  goto err;
835  }
836  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
837  if (status != OK) {
838  if (status == NOT_FOUND) {
839  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set output layout as NHWC, "\
840  "all output(s) are: \"%s\"\n", output_name, ov_model->all_output_names);
841  } else{
842  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
843  }
845  goto err;
846  }
847  ov_model->nb_outputs = 1;
848 
849  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
850  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
851  // ask openvino to do the conversion internally.
852  // the current supported SR model (frame processing) is generated from tensorflow model,
853  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
854  // TODO: we need to get a final clear&general solution with all backends/formats considered.
855  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
856  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
857  if (status != OK) {
858  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
860  goto err;
861  }
862  }
863 
864  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->device, &config, &ov_model->exe_network);
865  if (status != OK) {
866  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
867  status = ie_core_get_available_devices(ov_model->core, &a_dev);
868  if (status != OK) {
869  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
871  goto err;
872  }
873  for (int i = 0; i < a_dev.num_devices; i++) {
874  APPEND_STRING(all_dev_names, a_dev.devices[i])
875  }
876  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
877  ctx->device, all_dev_names);
878  ret = AVERROR(ENODEV);
879  goto err;
880  }
881 #endif
882  // create infer_requests for async execution
883  if (ctx->nireq <= 0) {
884  // the default value is a rough estimation
885  ctx->nireq = av_cpu_count() / 2 + 1;
886  }
887 
888  ov_model->request_queue = ff_safe_queue_create();
889  if (!ov_model->request_queue) {
890  ret = AVERROR(ENOMEM);
891  goto err;
892  }
893 
894  for (int i = 0; i < ctx->nireq; i++) {
895  OVRequestItem *item = av_mallocz(sizeof(*item));
896  if (!item) {
897  ret = AVERROR(ENOMEM);
898  goto err;
899  }
900 
901 #if HAVE_OPENVINO2
902  item->callback.callback_func = infer_completion_callback;
903 #else
904  item->callback.completeCallBackFunc = infer_completion_callback;
905 #endif
906  item->callback.args = item;
907  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
908  av_freep(&item);
909  ret = AVERROR(ENOMEM);
910  goto err;
911  }
912 
913 #if HAVE_OPENVINO2
914  status = ov_compiled_model_create_infer_request(ov_model->compiled_model, &item->infer_request);
915  if (status != OK) {
916  av_log(ctx, AV_LOG_ERROR, "Failed to Creates an inference request object.\n");
917  goto err;
918  }
919 #else
920  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
921  if (status != OK) {
923  goto err;
924  }
925 #endif
926 
927  item->lltasks = av_malloc_array(ctx->ov_option.batch_size, sizeof(*item->lltasks));
928  if (!item->lltasks) {
929  ret = AVERROR(ENOMEM);
930  goto err;
931  }
932  item->lltask_count = 0;
933  }
934 
935  ov_model->task_queue = ff_queue_create();
936  if (!ov_model->task_queue) {
937  ret = AVERROR(ENOMEM);
938  goto err;
939  }
940 
941  ov_model->lltask_queue = ff_queue_create();
942  if (!ov_model->lltask_queue) {
943  ret = AVERROR(ENOMEM);
944  goto err;
945  }
946 
947  return 0;
948 
949 err:
950 #if HAVE_OPENVINO2
951  if (output_tensor_info)
952  ov_preprocess_output_tensor_info_free(output_tensor_info);
953  if (ov_model->output_info)
954  ov_preprocess_output_info_free(ov_model->output_info);
955  if (NCHW_layout)
956  ov_layout_free(NCHW_layout);
957  if (NHWC_layout)
958  ov_layout_free(NHWC_layout);
959  if (input_model_info)
960  ov_preprocess_input_model_info_free(input_model_info);
961 #endif
962  return ret;
963 }
964 
965 static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
966 {
967 #if HAVE_OPENVINO2
968  ov_status_e status;
969 #else
970  IEStatusCode status;
971 #endif
972  LastLevelTaskItem *lltask;
973  int ret = 0;
974  TaskItem *task;
975  DnnContext *ctx;
976  OVModel *ov_model;
977 
978  if (ff_queue_size(inferenceq) == 0) {
979 #if HAVE_OPENVINO2
980  ov_infer_request_free(request->infer_request);
981 #else
982  ie_infer_request_free(&request->infer_request);
983 #endif
984  av_freep(&request);
985  return 0;
986  }
987 
988  lltask = ff_queue_peek_front(inferenceq);
989  task = lltask->task;
990  ov_model = task->model;
991  ctx = ov_model->ctx;
992 
993  ret = fill_model_input_ov(ov_model, request);
994  if (ret != 0) {
995  goto err;
996  }
997 
998 #if HAVE_OPENVINO2
999  if (task->async) {
1000  status = ov_infer_request_set_callback(request->infer_request, &request->callback);
1001  if (status != OK) {
1002  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1004  goto err;
1005  }
1006 
1007  status = ov_infer_request_start_async(request->infer_request);
1008  if (status != OK) {
1009  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1011  goto err;
1012  }
1013  return 0;
1014  } else {
1015  status = ov_infer_request_infer(request->infer_request);
1016  if (status != OK) {
1017  av_log(NULL, AV_LOG_ERROR, "Failed to start synchronous model inference for OV2\n");
1019  goto err;
1020  }
1021  infer_completion_callback(request);
1022  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
1023  }
1024 #else
1025  if (task->async) {
1026  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
1027  if (status != OK) {
1028  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1030  goto err;
1031  }
1032  status = ie_infer_request_infer_async(request->infer_request);
1033  if (status != OK) {
1034  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1036  goto err;
1037  }
1038  return 0;
1039  } else {
1040  status = ie_infer_request_infer(request->infer_request);
1041  if (status != OK) {
1042  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
1044  goto err;
1045  }
1046  infer_completion_callback(request);
1047  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
1048  }
1049 #endif
1050 err:
1051  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
1052 #if HAVE_OPENVINO2
1053  ov_infer_request_free(request->infer_request);
1054 #else
1055  ie_infer_request_free(&request->infer_request);
1056 #endif
1057  av_freep(&request);
1058  }
1059  return ret;
1060 }
1061 
1062 static int get_input_ov(DNNModel *model, DNNData *input, const char *input_name)
1063 {
1064  OVModel *ov_model = (OVModel *)model;
1065  DnnContext *ctx = ov_model->ctx;
1066  int input_resizable = ctx->ov_option.input_resizable;
1067 
1068 #if HAVE_OPENVINO2
1069  ov_shape_t input_shape = {0};
1070  ov_element_type_e precision;
1071  ov_status_e status;
1072  if (input_name)
1073  status = ov_model_const_input_by_name(ov_model->ov_model, input_name, &ov_model->input_port);
1074  else
1075  status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
1076  if (status != OK) {
1077  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
1078  return ov2_map_error(status, NULL);
1079  }
1080  status = ov_port_get_element_type(ov_model->input_port, &precision);
1081  if (status != OK) {
1082  av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
1083  return ov2_map_error(status, NULL);
1084  }
1085  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
1086  if (status != OK) {
1087  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
1088  return ov2_map_error(status, NULL);
1089  }
1090  for (int i = 0; i < 4; i++)
1091  input->dims[i] = input_shape.dims[i];
1092  if (input_resizable) {
1093  input->dims[dnn_get_width_idx_by_layout(input->layout)] = -1;
1094  input->dims[dnn_get_height_idx_by_layout(input->layout)] = -1;
1095  }
1096 
1097  if (input_shape.dims[1] <= 3) // NCHW
1098  input->layout = DL_NCHW;
1099  else // NHWC
1100  input->layout = DL_NHWC;
1101 
1102  input->dt = precision_to_datatype(precision);
1103  ov_shape_free(&input_shape);
1104  return 0;
1105 #else
1106  char *model_input_name = NULL;
1107  IEStatusCode status;
1108  size_t model_input_count = 0;
1109  dimensions_t dims;
1110  precision_e precision;
1111  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
1112  if (status != OK) {
1113  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
1114  return DNN_GENERIC_ERROR;
1115  }
1116  for (size_t i = 0; i < model_input_count; i++) {
1117  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
1118  if (status != OK) {
1119  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
1120  return DNN_GENERIC_ERROR;
1121  }
1122  if (strcmp(model_input_name, input_name) == 0) {
1123  ie_network_name_free(&model_input_name);
1124  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
1125  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
1126  if (status != OK) {
1127  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
1128  return DNN_GENERIC_ERROR;
1129  }
1130 
1131  for (int i = 0; i < 4; i++)
1132  input->dims[i] = input_shape.dims[i];
1133  if (input_resizable) {
1134  input->dims[dnn_get_width_idx_by_layout(input->layout)] = -1;
1135  input->dims[dnn_get_height_idx_by_layout(input->layout)] = -1;
1136  }
1137 
1138  if (input_shape.dims[1] <= 3) // NCHW
1139  input->layout = DL_NCHW;
1140  else // NHWC
1141  input->layout = DL_NHWC;
1142 
1143  input->dt = precision_to_datatype(precision);
1144  return 0;
1145  }
1146 
1147  ie_network_name_free(&model_input_name);
1148  }
1149 
1150  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
1151  return AVERROR(EINVAL);
1152 #endif
1153 }
1154 
1156 {
1157  AVFrameSideData *sd;
1159  const AVDetectionBBox *bbox;
1160 
1162  if (!sd) { // this frame has nothing detected
1163  return 0;
1164  }
1165 
1166  if (!sd->size) {
1167  return 0;
1168  }
1169 
1170  header = (const AVDetectionBBoxHeader *)sd->data;
1171  if (!header->nb_bboxes) {
1172  return 0;
1173  }
1174 
1175  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
1176  bbox = av_get_detection_bbox(header, i);
1177  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
1178  return 0;
1179  }
1180  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->height) {
1181  return 0;
1182  }
1183 
1185  return 0;
1186  }
1187  }
1188 
1189  return 1;
1190 }
1191 
1192 static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
1193 {
1194  switch (func_type) {
1195  case DFT_PROCESS_FRAME:
1196  case DFT_ANALYTICS_DETECT:
1197  {
1198  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
1199  if (!lltask) {
1200  return AVERROR(ENOMEM);
1201  }
1202  task->inference_todo = 1;
1203  task->inference_done = 0;
1204  lltask->task = task;
1205  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
1206  av_freep(&lltask);
1207  return AVERROR(ENOMEM);
1208  }
1209  return 0;
1210  }
1212  {
1214  AVFrame *frame = task->in_frame;
1215  AVFrameSideData *sd;
1217 
1218  task->inference_todo = 0;
1219  task->inference_done = 0;
1220 
1222  return 0;
1223  }
1224 
1226  header = (const AVDetectionBBoxHeader *)sd->data;
1227 
1228  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
1229  LastLevelTaskItem *lltask;
1231 
1232  if (params->target) {
1233  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
1234  continue;
1235  }
1236  }
1237 
1238  lltask = av_malloc(sizeof(*lltask));
1239  if (!lltask) {
1240  return AVERROR(ENOMEM);
1241  }
1242  task->inference_todo++;
1243  lltask->task = task;
1244  lltask->bbox_index = i;
1245  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
1246  av_freep(&lltask);
1247  return AVERROR(ENOMEM);
1248  }
1249  }
1250  return 0;
1251  }
1252  default:
1253  av_assert0(!"should not reach here");
1254  return AVERROR(EINVAL);
1255  }
1256 }
1257 
1258 static int get_output_ov(DNNModel *model, const char *input_name, int input_width, int input_height,
1259  const char *output_name, int *output_width, int *output_height)
1260 {
1261 #if HAVE_OPENVINO2
1262  ov_dimension_t dims[4] = {{1, 1}, {1, 1}, {input_height, input_height}, {input_width, input_width}};
1263  ov_status_e status;
1264  ov_shape_t input_shape = {0};
1265  ov_partial_shape_t partial_shape;
1266 #else
1267  IEStatusCode status;
1268  input_shapes_t input_shapes;
1269 #endif
1270  int ret;
1271  OVModel *ov_model = (OVModel *)model;
1272  DnnContext *ctx = ov_model->ctx;
1273  TaskItem task;
1274  OVRequestItem *request;
1275  DNNExecBaseParams exec_params = {
1276  .input_name = input_name,
1277  .output_names = output_name ? &output_name : NULL,
1278  .nb_output = 1,
1279  .in_frame = NULL,
1280  .out_frame = NULL,
1281  };
1282 
1283  if (ov_model->model.func_type != DFT_PROCESS_FRAME) {
1284  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
1285  return AVERROR(EINVAL);
1286  }
1287 
1288 #if HAVE_OPENVINO2
1289  if (ctx->ov_option.input_resizable) {
1290  status = ov_partial_shape_create(4, dims, &partial_shape);
1291  if (status != OK) {
1292  av_log(ctx, AV_LOG_ERROR, "Failed to create partial shape.\n");
1293  return ov2_map_error(status, NULL);
1294  }
1295  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
1296  if (status != OK) {
1297  av_log(ctx, AV_LOG_ERROR, "Failed to create shape for model input resize.\n");
1298  return ov2_map_error(status, NULL);
1299  }
1300  input_shape.dims[2] = input_height;
1301  input_shape.dims[3] = input_width;
1302 
1303  status = ov_shape_to_partial_shape(input_shape, &partial_shape);
1304  ov_shape_free(&input_shape);
1305  if (status != OK) {
1306  av_log(ctx, AV_LOG_ERROR, "Failed to create partial shape for model input resize.\n");
1307  return ov2_map_error(status, NULL);
1308  }
1309 
1310  status = ov_model_reshape_single_input(ov_model->ov_model, partial_shape);
1311  ov_partial_shape_free(&partial_shape);
1312  if (status != OK) {
1313  av_log(ctx, AV_LOG_ERROR, "Failed to reszie model input.\n");
1314  return ov2_map_error(status, NULL);
1315  }
1316  }
1317 
1318  if (!ov_model->compiled_model) {
1319 #else
1320  if (ctx->ov_option.input_resizable) {
1321  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
1322  input_shapes.shapes->shape.dims[2] = input_height;
1323  input_shapes.shapes->shape.dims[3] = input_width;
1324  status |= ie_network_reshape(ov_model->network, input_shapes);
1325  ie_network_input_shapes_free(&input_shapes);
1326  if (status != OK) {
1327  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
1328  return DNN_GENERIC_ERROR;
1329  }
1330  }
1331  if (!ov_model->exe_network) {
1332 #endif
1333  ret = init_model_ov(ov_model, input_name, output_name ? &output_name : NULL, 1);
1334  if (ret != 0) {
1335  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
1336  return ret;
1337  }
1338  }
1339 
1340  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx);
1341  if (ret != 0) {
1342  goto err;
1343  }
1344 
1345  ret = extract_lltask_from_task(ov_model->model.func_type, &task, ov_model->lltask_queue, NULL);
1346  if (ret != 0) {
1347  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
1348  goto err;
1349  }
1350 
1351  request = ff_safe_queue_pop_front(ov_model->request_queue);
1352  if (!request) {
1353  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1354  ret = AVERROR(EINVAL);
1355  goto err;
1356  }
1357 
1358  ret = execute_model_ov(request, ov_model->lltask_queue);
1359  *output_width = task.out_frame->width;
1360  *output_height = task.out_frame->height;
1361 err:
1362  av_frame_free(&task.out_frame);
1363  av_frame_free(&task.in_frame);
1364  return ret;
1365 }
1366 
1367 static DNNModel *dnn_load_model_ov(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
1368 {
1369  DNNModel *model = NULL;
1370  OVModel *ov_model = NULL;
1371 #if HAVE_OPENVINO2
1372  ov_core_t* core = NULL;
1373  ov_model_t* ovmodel = NULL;
1374  ov_status_e status;
1375 #else
1376  size_t node_count = 0;
1377  char *node_name = NULL;
1378  IEStatusCode status;
1379 #endif
1380 
1381  ov_model = av_mallocz(sizeof(OVModel));
1382  if (!ov_model)
1383  return NULL;
1384  ov_model->ctx = ctx;
1385  model = &ov_model->model;
1386 
1387 #if HAVE_OPENVINO2
1388  status = ov_core_create(&core);
1389  if (status != OK) {
1390  goto err;
1391  }
1392  ov_model->core = core;
1393 
1394  status = ov_core_read_model(core, ctx->model_filename, NULL, &ovmodel);
1395  if (status != OK) {
1396  ov_version_t ver;
1397  status = ov_get_openvino_version(&ver);
1398  av_log(NULL, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
1399  "Please check if the model version matches the runtime OpenVINO Version:\n",
1400  ctx->model_filename);
1401  if (status == OK) {
1402  av_log(NULL, AV_LOG_ERROR, "BuildNumber: %s\n", ver.buildNumber);
1403  }
1404  ov_version_free(&ver);
1405  goto err;
1406  }
1407  ov_model->ov_model = ovmodel;
1408 #else
1409  ov_model->all_input_names = NULL;
1410  ov_model->all_output_names = NULL;
1411 
1412  status = ie_core_create("", &ov_model->core);
1413  if (status != OK)
1414  goto err;
1415 
1416  status = ie_core_read_network(ov_model->core, ctx->model_filename, NULL, &ov_model->network);
1417  if (status != OK) {
1418  ie_version_t ver;
1419  ver = ie_c_api_version();
1420  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
1421  "Please check if the model version matches the runtime OpenVINO %s\n",
1422  ctx->model_filename, ver.api_version);
1423  ie_version_free(&ver);
1424  goto err;
1425  }
1426 
1427  //get all the input and output names
1428  status = ie_network_get_inputs_number(ov_model->network, &node_count);
1429  if (status != OK) {
1430  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
1431  goto err;
1432  }
1433  for (size_t i = 0; i < node_count; i++) {
1434  status = ie_network_get_input_name(ov_model->network, i, &node_name);
1435  if (status != OK) {
1436  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
1437  goto err;
1438  }
1439  APPEND_STRING(ov_model->all_input_names, node_name)
1440  ie_network_name_free(&node_name);
1441  }
1442  status = ie_network_get_outputs_number(ov_model->network, &node_count);
1443  if (status != OK) {
1444  av_log(ctx, AV_LOG_ERROR, "Failed to get output count\n");
1445  goto err;
1446  }
1447  for (size_t i = 0; i < node_count; i++) {
1448  status = ie_network_get_output_name(ov_model->network, i, &node_name);
1449  if (status != OK) {
1450  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d output's name\n", (int)i);
1451  goto err;
1452  }
1453  APPEND_STRING(ov_model->all_output_names, node_name)
1454  ie_network_name_free(&node_name);
1455  }
1456 #endif
1457 
1458  model->get_input = &get_input_ov;
1459  model->get_output = &get_output_ov;
1460  model->filter_ctx = filter_ctx;
1461  model->func_type = func_type;
1462 
1463  return model;
1464 
1465 err:
1466  dnn_free_model_ov(&model);
1467  return NULL;
1468 }
1469 
1470 static int dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
1471 {
1472  OVModel *ov_model = (OVModel *)model;
1473  DnnContext *ctx = ov_model->ctx;
1474  OVRequestItem *request;
1475  TaskItem *task;
1476  int ret;
1477 
1478  ret = ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params);
1479  if (ret != 0) {
1480  return ret;
1481  }
1482 
1483 #if HAVE_OPENVINO2
1484  if (!ov_model->compiled_model) {
1485 #else
1486  if (!ov_model->exe_network) {
1487 #endif
1488  ret = init_model_ov(ov_model, exec_params->input_name,
1489  exec_params->output_names, exec_params->nb_output);
1490  if (ret != 0) {
1491  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
1492  return ret;
1493  }
1494  }
1495 
1496  task = av_malloc(sizeof(*task));
1497  if (!task) {
1498  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
1499  return AVERROR(ENOMEM);
1500  }
1501 
1502  ret = ff_dnn_fill_task(task, exec_params, ov_model, ctx->async, 1);
1503  if (ret != 0) {
1504  av_freep(&task);
1505  return ret;
1506  }
1507 
1508  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
1509  av_freep(&task);
1510  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
1511  return AVERROR(ENOMEM);
1512  }
1513 
1514  ret = extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params);
1515  if (ret != 0) {
1516  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
1517  return ret;
1518  }
1519 
1520  if (ctx->async) {
1521  while (ff_queue_size(ov_model->lltask_queue) >= ctx->ov_option.batch_size) {
1522  request = ff_safe_queue_pop_front(ov_model->request_queue);
1523  if (!request) {
1524  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1525  return AVERROR(EINVAL);
1526  }
1527 
1528  ret = execute_model_ov(request, ov_model->lltask_queue);
1529  if (ret != 0) {
1530  return ret;
1531  }
1532  }
1533 
1534  return 0;
1535  }
1536  else {
1537  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
1538  // Classification filter has not been completely
1539  // tested with the sync mode. So, do not support now.
1540  avpriv_report_missing_feature(ctx, "classify for sync execution");
1541  return AVERROR(ENOSYS);
1542  }
1543 
1544  if (ctx->ov_option.batch_size > 1) {
1545  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
1546  return AVERROR(ENOSYS);
1547  }
1548 
1549  request = ff_safe_queue_pop_front(ov_model->request_queue);
1550  if (!request) {
1551  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1552  return AVERROR(EINVAL);
1553  }
1554  return execute_model_ov(request, ov_model->lltask_queue);
1555  }
1556 }
1557 
1558 static DNNAsyncStatusType dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
1559 {
1560  OVModel *ov_model = (OVModel *)model;
1561  return ff_dnn_get_result_common(ov_model->task_queue, in, out);
1562 }
1563 
1564 static int dnn_flush_ov(const DNNModel *model)
1565 {
1566  OVModel *ov_model = (OVModel *)model;
1567  DnnContext *ctx = ov_model->ctx;
1568  OVRequestItem *request;
1569 #if HAVE_OPENVINO2
1570  ov_status_e status;
1571 #else
1572  IEStatusCode status;
1573 #endif
1574  int ret;
1575 
1576  if (ff_queue_size(ov_model->lltask_queue) == 0) {
1577  // no pending task need to flush
1578  return 0;
1579  }
1580 
1581  request = ff_safe_queue_pop_front(ov_model->request_queue);
1582  if (!request) {
1583  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1584  return AVERROR(EINVAL);
1585  }
1586 
1587  ret = fill_model_input_ov(ov_model, request);
1588  if (ret != 0) {
1589  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
1590  return ret;
1591  }
1592 #if HAVE_OPENVINO2
1593  status = ov_infer_request_infer(request->infer_request);
1594  if (status != OK) {
1595  av_log(ctx, AV_LOG_ERROR, "Failed to start sync inference for OV2\n");
1596  return ov2_map_error(status, NULL);
1597  }
1598 #else
1599  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
1600  if (status != OK) {
1601  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1602  return DNN_GENERIC_ERROR;
1603  }
1604  status = ie_infer_request_infer_async(request->infer_request);
1605  if (status != OK) {
1606  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1607  return DNN_GENERIC_ERROR;
1608  }
1609 #endif
1610 
1611  return 0;
1612 }
1613 
1615  .clazz = DNN_DEFINE_CLASS(dnn_openvino),
1616  .type = DNN_OV,
1617  .load_model = dnn_load_model_ov,
1618  .execute_model = dnn_execute_model_ov,
1619  .get_result = dnn_get_result_ov,
1620  .flush = dnn_flush_ov,
1621  .free_model = dnn_free_model_ov,
1622 };
OVModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_openvino.c:64
ff_dnn_backend_openvino
const DNNModule ff_dnn_backend_openvino
OVModel::input_info
ov_preprocess_input_info_t * input_info
Definition: dnn_backend_openvino.c:51
OVRequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:76
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:55
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:947
OVModel::nb_outputs
int nb_outputs
Definition: dnn_backend_openvino.c:65
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:58
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:56
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:30
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
LastLevelTaskItem
Definition: dnn_backend_common.h:57
LastLevelTaskItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_common.h:59
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVFrame::width
int width
Definition: frame.h:446
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:357
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:110
ov2_map_error
static int ov2_map_error(ov_status_e status, const char **desc)
Definition: dnn_backend_openvino.c:125
data
const char data[16]
Definition: mxf.c:148
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:56
FLAGS
#define FLAGS
Definition: cmdutils.c:581
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:81
dnn_io_proc.h
TaskItem
Definition: dnn_backend_common.h:43
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:43
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
init_model_ov
static int init_model_ov(OVModel *ov_model, const char *input_name, const char **output_names, int nb_outputs)
Definition: dnn_backend_openvino.c:578
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:99
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
dnn_get_width_idx_by_layout
static int dnn_get_width_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:197
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
TaskItem::model
void * model
Definition: dnn_backend_common.h:44
OVModel::output_info
ov_preprocess_output_info_t * output_info
Definition: dnn_backend_openvino.c:53
DnnContext
Definition: dnn_interface.h:143
OVRequestItem::infer_request
ov_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:73
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:52
OVModel::output_ports
ov_output_const_port_t ** output_ports
Definition: dnn_backend_openvino.c:52
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
DL_NHWC
@ DL_NHWC
Definition: dnn_interface.h:66
Queue
Linear double-ended data structure.
Definition: queue.c:33
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFrameSideData::size
size_t size
Definition: frame.h:253
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
OVModel::model
DNNModel model
Definition: dnn_backend_openvino.c:44
float
float
Definition: af_crystalizer.c:121
desc
const char * desc
Definition: dnn_backend_openvino.c:103
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:58
DNNExecClassificationParams
Definition: dnn_interface.h:88
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:69
DNNModule::clazz
const AVClass clazz
Definition: dnn_interface.h:176
execute_model_ov
static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:965
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:156
DNNModel::get_output
int(* get_output)(struct DNNModel *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:106
ctx
AVFormatContext * ctx
Definition: movenc.c:49
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:52
DL_NCHW
@ DL_NCHW
Definition: dnn_interface.h:65
dnn_free_model_ov
static void dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:513
OVRequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:77
OVModel::preprocess
ov_preprocess_prepostprocessor_t * preprocess
Definition: dnn_backend_openvino.c:54
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:37
if
if(ret)
Definition: filter_design.txt:179
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:182
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:90
OVModel::all_input_names
const char * all_input_names
Definition: dnn_backend_openvino.c:59
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:423
NULL
#define NULL
Definition: coverity.c:32
OVRequestItem::lltask_count
uint32_t lltask_count
Definition: dnn_backend_openvino.c:71
av_err
int av_err
Definition: dnn_backend_openvino.c:102
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:57
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:113
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:45
OVRequestItem::lltasks
LastLevelTaskItem ** lltasks
Definition: dnn_backend_openvino.c:70
OVRequestItem
Definition: dnn_backend_openvino.c:69
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:211
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:217
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:163
f
f
Definition: af_crystalizer.c:121
OVModel::compiled_model
ov_compiled_model_t * compiled_model
Definition: dnn_backend_openvino.c:49
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:49
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:53
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
cpu.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:115
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:101
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:41
OVModel::all_output_names
const char * all_output_names
Definition: dnn_backend_openvino.c:60
header
static const uint8_t header[24]
Definition: sdr2.c:68
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:41
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:50
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
DNN_DEFINE_CLASS
#define DNN_DEFINE_CLASS(fname)
Definition: dnn_backend_common.h:39
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:1155
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:248
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:87
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:335
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:59
OVModel::ov_model
ov_model_t * ov_model
Definition: dnn_backend_openvino.c:48
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
get_output_ov
static int get_output_ov(DNNModel *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:1258
OVModel::ctx
DnnContext * ctx
Definition: dnn_backend_openvino.c:45
OVModel::core
ov_core_t * core
Definition: dnn_backend_openvino.c:47
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
safe_queue.h
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:62
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:48
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:311
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:117
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:41
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:46
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:63
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:60
AVFrame::height
int height
Definition: frame.h:446
extract_lltask_from_task
static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:1192
status
ov_status_e status
Definition: dnn_backend_openvino.c:101
get_input_ov
static int get_input_ov(DNNModel *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:1062
dnn_backend_common.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:245
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ov2_errors
static const struct @290 ov2_errors[]
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:136
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:863
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:82
DL_NONE
@ DL_NONE
Definition: dnn_interface.h:64
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
DNNModel
Definition: dnn_interface.h:97
precision_to_datatype
static DNNDataType precision_to_datatype(ov_element_type_e precision) static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:142
mem.h
dnn_get_height_idx_by_layout
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:202
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:47
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
ff_frame_to_dnn_classify
int ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:340
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:261
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
DNNExecBaseParams
Definition: dnn_interface.h:80
DNNModel::get_input
int(* get_input)(struct DNNModel *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:104
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
OVModel::input_port
ov_output_const_port_t * input_port
Definition: dnn_backend_openvino.c:50
AVDetectionBBox
Definition: detection_bbox.h:26
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:50
OVRequestItem::callback
ov_callback_t callback
Definition: dnn_backend_openvino.c:74
avstring.h
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:45
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:49
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:254
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:58
detection_bbox.h
fill_model_input_ov
static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
Definition: dnn_backend_openvino.c:177
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:194
DNNModule
Definition: dnn_interface.h:175
DNNExecBaseParams::nb_output
uint32_t nb_output
Definition: dnn_interface.h:83
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:42