FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_backend_openvino.h"
27 #include "dnn_io_proc.h"
28 #include "libavformat/avio.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/avstring.h"
34 #include "../internal.h"
35 #include "safe_queue.h"
36 #include <c_api/ie_c_api.h>
37 #include "dnn_backend_common.h"
38 
39 typedef struct OVOptions{
40  char *device_type;
41  int nireq;
42  uint8_t async;
45 } OVOptions;
46 
47 typedef struct OVContext {
48  const AVClass *class;
50 } OVContext;
51 
52 typedef struct OVModel{
55  ie_core_t *core;
56  ie_network_t *network;
57  ie_executable_network_t *exe_network;
58  SafeQueue *request_queue; // holds OVRequestItem
59  Queue *task_queue; // holds TaskItem
60  Queue *lltask_queue; // holds LastLevelTaskItem
61  const char *all_input_names;
62  const char *all_output_names;
63 } OVModel;
64 
65 // one request for one call to openvino
66 typedef struct OVRequestItem {
67  ie_infer_request_t *infer_request;
69  uint32_t lltask_count;
70  ie_complete_call_back_t callback;
72 
73 #define APPEND_STRING(generated_string, iterate_string) \
74  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
75  av_asprintf("%s", iterate_string);
76 
77 #define OFFSET(x) offsetof(OVContext, x)
78 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
79 static const AVOption dnn_openvino_options[] = {
80  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
82  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
83  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
84  { NULL }
85 };
86 
87 AVFILTER_DEFINE_CLASS(dnn_openvino);
88 
89 static DNNDataType precision_to_datatype(precision_e precision)
90 {
91  switch (precision)
92  {
93  case FP32:
94  return DNN_FLOAT;
95  case U8:
96  return DNN_UINT8;
97  default:
98  av_assert0(!"not supported yet.");
99  return DNN_FLOAT;
100  }
101 }
102 
104 {
105  switch (dt)
106  {
107  case DNN_FLOAT:
108  return sizeof(float);
109  case DNN_UINT8:
110  return sizeof(uint8_t);
111  default:
112  av_assert0(!"not supported yet.");
113  return 1;
114  }
115 }
116 
117 static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
118 {
119  dimensions_t dims;
120  precision_e precision;
121  ie_blob_buffer_t blob_buffer;
122  OVContext *ctx = &ov_model->ctx;
123  IEStatusCode status;
124  DNNData input;
125  ie_blob_t *input_blob = NULL;
126  LastLevelTaskItem *lltask;
127  TaskItem *task;
128 
129  lltask = ff_queue_peek_front(ov_model->lltask_queue);
130  av_assert0(lltask);
131  task = lltask->task;
132 
133  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
134  if (status != OK) {
135  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
136  return DNN_GENERIC_ERROR;
137  }
138 
139  status |= ie_blob_get_dims(input_blob, &dims);
140  status |= ie_blob_get_precision(input_blob, &precision);
141  if (status != OK) {
142  ie_blob_free(&input_blob);
143  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
144  return DNN_GENERIC_ERROR;
145  }
146 
147  status = ie_blob_get_buffer(input_blob, &blob_buffer);
148  if (status != OK) {
149  ie_blob_free(&input_blob);
150  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
151  return DNN_GENERIC_ERROR;
152  }
153 
154  input.height = dims.dims[2];
155  input.width = dims.dims[3];
156  input.channels = dims.dims[1];
157  input.data = blob_buffer.buffer;
158  input.dt = precision_to_datatype(precision);
159  // all models in openvino open model zoo use BGR as input,
160  // change to be an option when necessary.
161  input.order = DCO_BGR;
162 
163  for (int i = 0; i < ctx->options.batch_size; ++i) {
164  lltask = ff_queue_pop_front(ov_model->lltask_queue);
165  if (!lltask) {
166  break;
167  }
168  request->lltasks[i] = lltask;
169  request->lltask_count = i + 1;
170  task = lltask->task;
171  switch (ov_model->model->func_type) {
172  case DFT_PROCESS_FRAME:
173  if (task->do_ioproc) {
174  if (ov_model->model->frame_pre_proc != NULL) {
175  ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
176  } else {
178  }
179  }
180  break;
183  break;
186  break;
187  default:
188  av_assert0(!"should not reach here");
189  break;
190  }
191  input.data = (uint8_t *)input.data
192  + input.width * input.height * input.channels * get_datatype_size(input.dt);
193  }
194  ie_blob_free(&input_blob);
195 
196  return 0;
197 }
198 
199 static void infer_completion_callback(void *args)
200 {
201  dimensions_t dims;
202  precision_e precision;
203  IEStatusCode status;
204  OVRequestItem *request = args;
205  LastLevelTaskItem *lltask = request->lltasks[0];
206  TaskItem *task = lltask->task;
207  OVModel *ov_model = task->model;
208  SafeQueue *requestq = ov_model->request_queue;
209  ie_blob_t *output_blob = NULL;
210  ie_blob_buffer_t blob_buffer;
211  DNNData output;
212  OVContext *ctx = &ov_model->ctx;
213 
214  status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
215  if (status != OK) {
217  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
218  task->output_names[0], ov_model->all_output_names);
219  return;
220  }
221 
222  status = ie_blob_get_buffer(output_blob, &blob_buffer);
223  if (status != OK) {
224  ie_blob_free(&output_blob);
225  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
226  return;
227  }
228 
229  status |= ie_blob_get_dims(output_blob, &dims);
230  status |= ie_blob_get_precision(output_blob, &precision);
231  if (status != OK) {
232  ie_blob_free(&output_blob);
233  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
234  return;
235  }
236 
237  output.channels = dims.dims[1];
238  output.height = dims.dims[2];
239  output.width = dims.dims[3];
240  output.dt = precision_to_datatype(precision);
241  output.data = blob_buffer.buffer;
242 
243  av_assert0(request->lltask_count <= dims.dims[0]);
244  av_assert0(request->lltask_count >= 1);
245  for (int i = 0; i < request->lltask_count; ++i) {
246  task = request->lltasks[i]->task;
247 
248  switch (ov_model->model->func_type) {
249  case DFT_PROCESS_FRAME:
250  if (task->do_ioproc) {
251  if (ov_model->model->frame_post_proc != NULL) {
252  ov_model->model->frame_post_proc(task->out_frame, &output, ov_model->model->filter_ctx);
253  } else {
255  }
256  } else {
257  task->out_frame->width = output.width;
258  task->out_frame->height = output.height;
259  }
260  break;
262  if (!ov_model->model->detect_post_proc) {
263  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
264  return;
265  }
266  ov_model->model->detect_post_proc(task->in_frame, &output, 1, ov_model->model->filter_ctx);
267  break;
269  if (!ov_model->model->classify_post_proc) {
270  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
271  return;
272  }
273  ov_model->model->classify_post_proc(task->in_frame, &output, request->lltasks[i]->bbox_index, ov_model->model->filter_ctx);
274  break;
275  default:
276  av_assert0(!"should not reach here");
277  break;
278  }
279 
280  task->inference_done++;
281  av_freep(&request->lltasks[i]);
282  output.data = (uint8_t *)output.data
283  + output.width * output.height * output.channels * get_datatype_size(output.dt);
284  }
285  ie_blob_free(&output_blob);
286 
287  request->lltask_count = 0;
288  if (ff_safe_queue_push_back(requestq, request) < 0) {
289  ie_infer_request_free(&request->infer_request);
290  av_freep(&request);
291  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
292  return;
293  }
294 }
295 
296 static int init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
297 {
298  int ret = 0;
299  OVContext *ctx = &ov_model->ctx;
300  IEStatusCode status;
301  ie_available_devices_t a_dev;
302  ie_config_t config = {NULL, NULL, NULL};
303  char *all_dev_names = NULL;
304 
305  // batch size
306  if (ctx->options.batch_size <= 0) {
307  ctx->options.batch_size = 1;
308  }
309 
310  if (ctx->options.batch_size > 1) {
311  input_shapes_t input_shapes;
312  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
313  if (status != OK) {
315  goto err;
316  }
317  for (int i = 0; i < input_shapes.shape_num; i++)
318  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
319  status = ie_network_reshape(ov_model->network, input_shapes);
320  ie_network_input_shapes_free(&input_shapes);
321  if (status != OK) {
323  goto err;
324  }
325  }
326 
327  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
328  // while we pass NHWC data from FFmpeg to openvino
329  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
330  if (status != OK) {
331  if (status == NOT_FOUND) {
332  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set input layout as NHWC, "\
333  "all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
334  } else{
335  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
336  }
338  goto err;
339  }
340  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
341  if (status != OK) {
342  if (status == NOT_FOUND) {
343  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set output layout as NHWC, "\
344  "all output(s) are: \"%s\"\n", input_name, ov_model->all_output_names);
345  } else{
346  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
347  }
349  goto err;
350  }
351 
352  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
353  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
354  // ask openvino to do the conversion internally.
355  // the current supported SR model (frame processing) is generated from tensorflow model,
356  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
357  // TODO: we need to get a final clear&general solution with all backends/formats considered.
358  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
359  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
360  if (status != OK) {
361  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
363  goto err;
364  }
365  }
366 
367  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
368  if (status != OK) {
369  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
370  status = ie_core_get_available_devices(ov_model->core, &a_dev);
371  if (status != OK) {
372  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
374  goto err;
375  }
376  for (int i = 0; i < a_dev.num_devices; i++) {
377  APPEND_STRING(all_dev_names, a_dev.devices[i])
378  }
379  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
380  ctx->options.device_type, all_dev_names);
381  ret = AVERROR(ENODEV);
382  goto err;
383  }
384 
385  // create infer_requests for async execution
386  if (ctx->options.nireq <= 0) {
387  // the default value is a rough estimation
388  ctx->options.nireq = av_cpu_count() / 2 + 1;
389  }
390 
391  ov_model->request_queue = ff_safe_queue_create();
392  if (!ov_model->request_queue) {
393  ret = AVERROR(ENOMEM);
394  goto err;
395  }
396 
397  for (int i = 0; i < ctx->options.nireq; i++) {
398  OVRequestItem *item = av_mallocz(sizeof(*item));
399  if (!item) {
400  ret = AVERROR(ENOMEM);
401  goto err;
402  }
403 
404  item->callback.completeCallBackFunc = infer_completion_callback;
405  item->callback.args = item;
406  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
407  av_freep(&item);
408  ret = AVERROR(ENOMEM);
409  goto err;
410  }
411 
412  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
413  if (status != OK) {
415  goto err;
416  }
417 
418  item->lltasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->lltasks));
419  if (!item->lltasks) {
420  ret = AVERROR(ENOMEM);
421  goto err;
422  }
423  item->lltask_count = 0;
424  }
425 
426  ov_model->task_queue = ff_queue_create();
427  if (!ov_model->task_queue) {
428  ret = AVERROR(ENOMEM);
429  goto err;
430  }
431 
432  ov_model->lltask_queue = ff_queue_create();
433  if (!ov_model->lltask_queue) {
434  ret = AVERROR(ENOMEM);
435  goto err;
436  }
437 
438  return 0;
439 
440 err:
441  ff_dnn_free_model_ov(&ov_model->model);
442  return ret;
443 }
444 
445 static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
446 {
447  IEStatusCode status;
448  LastLevelTaskItem *lltask;
449  int ret = 0;
450  TaskItem *task;
451  OVContext *ctx;
452  OVModel *ov_model;
453 
454  if (ff_queue_size(inferenceq) == 0) {
455  ie_infer_request_free(&request->infer_request);
456  av_freep(&request);
457  return 0;
458  }
459 
460  lltask = ff_queue_peek_front(inferenceq);
461  task = lltask->task;
462  ov_model = task->model;
463  ctx = &ov_model->ctx;
464 
465  if (task->async) {
466  ret = fill_model_input_ov(ov_model, request);
467  if (ret != 0) {
468  goto err;
469  }
470  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
471  if (status != OK) {
472  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
474  goto err;
475  }
476  status = ie_infer_request_infer_async(request->infer_request);
477  if (status != OK) {
478  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
480  goto err;
481  }
482  return 0;
483  } else {
484  ret = fill_model_input_ov(ov_model, request);
485  if (ret != 0) {
486  goto err;
487  }
488  status = ie_infer_request_infer(request->infer_request);
489  if (status != OK) {
490  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
492  goto err;
493  }
494  infer_completion_callback(request);
495  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
496  }
497 err:
498  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
499  ie_infer_request_free(&request->infer_request);
500  av_freep(&request);
501  }
502  return ret;
503 }
504 
505 static int get_input_ov(void *model, DNNData *input, const char *input_name)
506 {
507  OVModel *ov_model = model;
508  OVContext *ctx = &ov_model->ctx;
509  char *model_input_name = NULL;
510  IEStatusCode status;
511  size_t model_input_count = 0;
512  dimensions_t dims;
513  precision_e precision;
514  int input_resizable = ctx->options.input_resizable;
515 
516  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
517  if (status != OK) {
518  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
519  return DNN_GENERIC_ERROR;
520  }
521 
522  for (size_t i = 0; i < model_input_count; i++) {
523  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
524  if (status != OK) {
525  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
526  return DNN_GENERIC_ERROR;
527  }
528  if (strcmp(model_input_name, input_name) == 0) {
529  ie_network_name_free(&model_input_name);
530  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
531  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
532  if (status != OK) {
533  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
534  return DNN_GENERIC_ERROR;
535  }
536 
537  input->channels = dims.dims[1];
538  input->height = input_resizable ? -1 : dims.dims[2];
539  input->width = input_resizable ? -1 : dims.dims[3];
540  input->dt = precision_to_datatype(precision);
541  return 0;
542  }
543 
544  ie_network_name_free(&model_input_name);
545  }
546 
547  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
548  return AVERROR(EINVAL);
549 }
550 
552 {
553  AVFrameSideData *sd;
555  const AVDetectionBBox *bbox;
556 
558  if (!sd) { // this frame has nothing detected
559  return 0;
560  }
561 
562  if (!sd->size) {
563  return 0;
564  }
565 
566  header = (const AVDetectionBBoxHeader *)sd->data;
567  if (!header->nb_bboxes) {
568  return 0;
569  }
570 
571  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
572  bbox = av_get_detection_bbox(header, i);
573  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
574  return 0;
575  }
576  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->width) {
577  return 0;
578  }
579 
581  return 0;
582  }
583  }
584 
585  return 1;
586 }
587 
588 static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
589 {
590  switch (func_type) {
591  case DFT_PROCESS_FRAME:
593  {
594  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
595  if (!lltask) {
596  return AVERROR(ENOMEM);
597  }
598  task->inference_todo = 1;
599  task->inference_done = 0;
600  lltask->task = task;
601  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
602  av_freep(&lltask);
603  return AVERROR(ENOMEM);
604  }
605  return 0;
606  }
608  {
610  AVFrame *frame = task->in_frame;
611  AVFrameSideData *sd;
613 
614  task->inference_todo = 0;
615  task->inference_done = 0;
616 
618  return 0;
619  }
620 
622  header = (const AVDetectionBBoxHeader *)sd->data;
623 
624  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
625  LastLevelTaskItem *lltask;
627 
628  if (params->target) {
629  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
630  continue;
631  }
632  }
633 
634  lltask = av_malloc(sizeof(*lltask));
635  if (!lltask) {
636  return AVERROR(ENOMEM);
637  }
638  task->inference_todo++;
639  lltask->task = task;
640  lltask->bbox_index = i;
641  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
642  av_freep(&lltask);
643  return AVERROR(ENOMEM);
644  }
645  }
646  return 0;
647  }
648  default:
649  av_assert0(!"should not reach here");
650  return AVERROR(EINVAL);
651  }
652 }
653 
654 static int get_output_ov(void *model, const char *input_name, int input_width, int input_height,
655  const char *output_name, int *output_width, int *output_height)
656 {
657  int ret;
658  OVModel *ov_model = model;
659  OVContext *ctx = &ov_model->ctx;
660  TaskItem task;
661  OVRequestItem *request;
662  IEStatusCode status;
663  input_shapes_t input_shapes;
664  DNNExecBaseParams exec_params = {
665  .input_name = input_name,
666  .output_names = &output_name,
667  .nb_output = 1,
668  .in_frame = NULL,
669  .out_frame = NULL,
670  };
671 
672  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
673  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
674  return AVERROR(EINVAL);
675  }
676 
677  if (ctx->options.input_resizable) {
678  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
679  input_shapes.shapes->shape.dims[2] = input_height;
680  input_shapes.shapes->shape.dims[3] = input_width;
681  status |= ie_network_reshape(ov_model->network, input_shapes);
682  ie_network_input_shapes_free(&input_shapes);
683  if (status != OK) {
684  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
685  return DNN_GENERIC_ERROR;
686  }
687  }
688 
689  if (!ov_model->exe_network) {
690  ret = init_model_ov(ov_model, input_name, output_name);
691  if (ret != 0) {
692  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
693  return ret;
694  }
695  }
696 
697  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx);
698  if (ret != 0) {
699  goto err;
700  }
701 
702  ret = extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL);
703  if (ret != 0) {
704  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
705  goto err;
706  }
707 
708  request = ff_safe_queue_pop_front(ov_model->request_queue);
709  if (!request) {
710  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
711  ret = AVERROR(EINVAL);
712  goto err;
713  }
714 
715  ret = execute_model_ov(request, ov_model->lltask_queue);
716  *output_width = task.out_frame->width;
717  *output_height = task.out_frame->height;
718 err:
719  av_frame_free(&task.out_frame);
720  av_frame_free(&task.in_frame);
721  return ret;
722 }
723 
724 DNNModel *ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
725 {
726  DNNModel *model = NULL;
727  OVModel *ov_model = NULL;
728  OVContext *ctx = NULL;
729  IEStatusCode status;
730  size_t node_count = 0;
731  char *node_name = NULL;
732 
733  model = av_mallocz(sizeof(DNNModel));
734  if (!model){
735  return NULL;
736  }
737 
738  ov_model = av_mallocz(sizeof(OVModel));
739  if (!ov_model) {
740  av_freep(&model);
741  return NULL;
742  }
743  model->model = ov_model;
744  ov_model->model = model;
745  ov_model->ctx.class = &dnn_openvino_class;
746  ctx = &ov_model->ctx;
747  ov_model->all_input_names = NULL;
748  ov_model->all_output_names = NULL;
749 
750  //parse options
752  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
753  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
754  goto err;
755  }
756 
757  status = ie_core_create("", &ov_model->core);
758  if (status != OK)
759  goto err;
760 
761  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
762  if (status != OK) {
763  ie_version_t ver;
764  ver = ie_c_api_version();
765  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
766  "Please check if the model version matches the runtime OpenVINO %s\n",
767  model_filename, ver.api_version);
768  ie_version_free(&ver);
769  goto err;
770  }
771 
772  //get all the input and output names
773  status = ie_network_get_inputs_number(ov_model->network, &node_count);
774  if (status != OK) {
775  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
776  goto err;
777  }
778  for (size_t i = 0; i < node_count; i++) {
779  status = ie_network_get_input_name(ov_model->network, i, &node_name);
780  if (status != OK) {
781  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
782  goto err;
783  }
784  APPEND_STRING(ov_model->all_input_names, node_name)
785  }
786  status = ie_network_get_outputs_number(ov_model->network, &node_count);
787  if (status != OK) {
788  av_log(ctx, AV_LOG_ERROR, "Failed to get output count\n");
789  goto err;
790  }
791  for (size_t i = 0; i < node_count; i++) {
792  status = ie_network_get_output_name(ov_model->network, i, &node_name);
793  if (status != OK) {
794  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d output's name\n", (int)i);
795  goto err;
796  }
797  APPEND_STRING(ov_model->all_output_names, node_name)
798  }
799 
800  model->get_input = &get_input_ov;
801  model->get_output = &get_output_ov;
802  model->options = options;
803  model->filter_ctx = filter_ctx;
804  model->func_type = func_type;
805 
806  return model;
807 
808 err:
809  ff_dnn_free_model_ov(&model);
810  return NULL;
811 }
812 
813 int ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
814 {
815  OVModel *ov_model = model->model;
816  OVContext *ctx = &ov_model->ctx;
817  OVRequestItem *request;
818  TaskItem *task;
819  int ret;
820 
821  ret = ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params);
822  if (ret != 0) {
823  return ret;
824  }
825 
826  if (!ov_model->exe_network) {
827  ret = init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]);
828  if (ret != 0) {
829  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
830  return ret;
831  }
832  }
833 
834  task = av_malloc(sizeof(*task));
835  if (!task) {
836  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
837  return AVERROR(ENOMEM);
838  }
839 
840  ret = ff_dnn_fill_task(task, exec_params, ov_model, ctx->options.async, 1);
841  if (ret != 0) {
842  av_freep(&task);
843  return ret;
844  }
845 
846  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
847  av_freep(&task);
848  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
849  return AVERROR(ENOMEM);
850  }
851 
852  ret = extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params);
853  if (ret != 0) {
854  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
855  return ret;
856  }
857 
858  if (ctx->options.async) {
859  while (ff_queue_size(ov_model->lltask_queue) >= ctx->options.batch_size) {
860  request = ff_safe_queue_pop_front(ov_model->request_queue);
861  if (!request) {
862  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
863  return AVERROR(EINVAL);
864  }
865 
866  ret = execute_model_ov(request, ov_model->lltask_queue);
867  if (ret != 0) {
868  return ret;
869  }
870  }
871 
872  return 0;
873  }
874  else {
875  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
876  // Classification filter has not been completely
877  // tested with the sync mode. So, do not support now.
878  avpriv_report_missing_feature(ctx, "classify for sync execution");
879  return AVERROR(ENOSYS);
880  }
881 
882  if (ctx->options.batch_size > 1) {
883  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
884  return AVERROR(ENOSYS);
885  }
886 
887  request = ff_safe_queue_pop_front(ov_model->request_queue);
888  if (!request) {
889  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
890  return AVERROR(EINVAL);
891  }
892  return execute_model_ov(request, ov_model->lltask_queue);
893  }
894 }
895 
897 {
898  OVModel *ov_model = model->model;
899  return ff_dnn_get_result_common(ov_model->task_queue, in, out);
900 }
901 
902 int ff_dnn_flush_ov(const DNNModel *model)
903 {
904  OVModel *ov_model = model->model;
905  OVContext *ctx = &ov_model->ctx;
906  OVRequestItem *request;
907  IEStatusCode status;
908  int ret;
909 
910  if (ff_queue_size(ov_model->lltask_queue) == 0) {
911  // no pending task need to flush
912  return 0;
913  }
914 
915  request = ff_safe_queue_pop_front(ov_model->request_queue);
916  if (!request) {
917  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
918  return AVERROR(EINVAL);
919  }
920 
921  ret = fill_model_input_ov(ov_model, request);
922  if (ret != 0) {
923  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
924  return ret;
925  }
926  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
927  if (status != OK) {
928  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
929  return DNN_GENERIC_ERROR;
930  }
931  status = ie_infer_request_infer_async(request->infer_request);
932  if (status != OK) {
933  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
934  return DNN_GENERIC_ERROR;
935  }
936 
937  return 0;
938 }
939 
941 {
942  if (*model){
943  OVModel *ov_model = (*model)->model;
944  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
946  if (item && item->infer_request) {
947  ie_infer_request_free(&item->infer_request);
948  }
949  av_freep(&item->lltasks);
950  av_freep(&item);
951  }
953 
954  while (ff_queue_size(ov_model->lltask_queue) != 0) {
956  av_freep(&item);
957  }
958  ff_queue_destroy(ov_model->lltask_queue);
959 
960  while (ff_queue_size(ov_model->task_queue) != 0) {
961  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
962  av_frame_free(&item->in_frame);
963  av_frame_free(&item->out_frame);
964  av_freep(&item);
965  }
966  ff_queue_destroy(ov_model->task_queue);
967 
968  if (ov_model->exe_network)
969  ie_exec_network_free(&ov_model->exe_network);
970  if (ov_model->network)
971  ie_network_free(&ov_model->network);
972  if (ov_model->core)
973  ie_core_free(&ov_model->core);
974  av_freep(&ov_model);
975  av_freep(model);
976  }
977 }
OVModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_openvino.c:60
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
OVRequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:70
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
OVContext::class
const AVClass * class
Definition: dnn_backend_openvino.c:48
opt.h
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1459
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:682
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:57
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:29
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
get_input_ov
static int get_input_ov(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:505
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
LastLevelTaskItem
Definition: dnn_backend_common.h:50
LastLevelTaskItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_common.h:52
test::height
int height
Definition: vc1dsp.c:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
AVFrame::width
int width
Definition: frame.h:402
OVOptions::async
uint8_t async
Definition: dnn_backend_openvino.c:42
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_openvino)
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:251
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:101
ff_dnn_load_model_ov
DNNModel * ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_backend_openvino.c:724
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:55
FLAGS
#define FLAGS
Definition: cmdutils.c:515
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:68
dnn_io_proc.h
TaskItem
Definition: dnn_backend_common.h:36
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:52
OVOptions::batch_size
int batch_size
Definition: dnn_backend_openvino.c:43
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:90
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
ff_dnn_flush_ov
int ff_dnn_flush_ov(const DNNModel *model)
Definition: dnn_backend_openvino.c:902
TaskItem::model
void * model
Definition: dnn_backend_common.h:37
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:51
OVOptions::device_type
char * device_type
Definition: dnn_backend_openvino.c:40
Queue
Linear double-ended data structure.
Definition: queue.c:33
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:31
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFrameSideData::size
size_t size
Definition: frame.h:239
float
float
Definition: af_crystalizer.c:122
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:51
DNNExecClassificationParams
Definition: dnn_interface.h:75
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
ff_dnn_free_model_ov
void ff_dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:940
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
DNNData
Definition: dnn_interface.h:59
execute_model_ov
static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:445
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:162
ctx
AVFormatContext * ctx
Definition: movenc.c:48
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:45
OVRequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:67
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:35
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
if
if(ret)
Definition: filter_design.txt:179
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:100
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:77
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OVModel::all_input_names
const char * all_input_names
Definition: dnn_backend_openvino.c:61
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:254
NULL
#define NULL
Definition: coverity.c:32
OVRequestItem::lltask_count
uint32_t lltask_count
Definition: dnn_backend_openvino.c:69
init_model_ov
static int init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
Definition: dnn_backend_openvino.c:296
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:56
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:104
get_output_ov
static int get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:654
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1667
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:38
OVRequestItem::lltasks
LastLevelTaskItem ** lltasks
Definition: dnn_backend_openvino.c:68
OVModel::ctx
OVContext ctx
Definition: dnn_backend_openvino.c:53
OVRequestItem
Definition: dnn_backend_openvino.c:66
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:206
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:218
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:103
options
const OptionDef options[]
test::width
int width
Definition: vc1dsp.c:38
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:42
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:46
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
cpu.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
OVModel::model
DNNModel * model
Definition: dnn_backend_openvino.c:54
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:106
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:92
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
OVModel::all_output_names
const char * all_output_names
Definition: dnn_backend_openvino.c:62
header
static const uint8_t header[24]
Definition: sdr2.c:67
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:56
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:551
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
OVOptions::input_resizable
int input_resizable
Definition: dnn_backend_openvino.c:44
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:79
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:199
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
precision_to_datatype
static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:89
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
safe_queue.h
ff_dnn_execute_model_ov
int ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:813
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:58
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:41
OVContext
Definition: dnn_backend_openvino.c:47
dnn_backend_openvino.h
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:108
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNNModel::get_input
int(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:95
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:39
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:59
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:56
AVFrame::height
int height
Definition: frame.h:402
extract_lltask_from_task
static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:588
dnn_backend_common.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:142
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:69
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
DNNModel
Definition: dnn_interface.h:84
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:40
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
DNNModel::options
const char * options
Definition: dnn_interface.h:88
OVOptions::nireq
int nireq
Definition: dnn_backend_openvino.c:41
ff_frame_to_dnn_classify
int ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:193
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
OVOptions
Definition: dnn_backend_openvino.c:39
DNNExecBaseParams
Definition: dnn_interface.h:67
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVDetectionBBox
Definition: detection_bbox.h:26
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:43
DNNModel::get_output
int(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:97
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:41
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
detection_bbox.h
fill_model_input_ov
static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
Definition: dnn_backend_openvino.c:117
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:190
OVContext::options
OVOptions options
Definition: dnn_backend_openvino.c:49
ff_dnn_get_result_ov
DNNAsyncStatusType ff_dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_openvino.c:896
DNNModel::model
void * model
Definition: dnn_interface.h:86
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:27