FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_backend_openvino.h"
27 #include "dnn_io_proc.h"
28 #include "libavformat/avio.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/avstring.h"
34 #include "../internal.h"
35 #include "safe_queue.h"
36 #include <c_api/ie_c_api.h>
37 #include "dnn_backend_common.h"
38 
39 typedef struct OVOptions{
40  char *device_type;
41  int nireq;
42  uint8_t async;
45 } OVOptions;
46 
47 typedef struct OVContext {
48  const AVClass *class;
50 } OVContext;
51 
52 typedef struct OVModel{
55  ie_core_t *core;
56  ie_network_t *network;
57  ie_executable_network_t *exe_network;
58  SafeQueue *request_queue; // holds OVRequestItem
59  Queue *task_queue; // holds TaskItem
60  Queue *lltask_queue; // holds LastLevelTaskItem
61 } OVModel;
62 
63 // one request for one call to openvino
64 typedef struct OVRequestItem {
65  ie_infer_request_t *infer_request;
67  uint32_t lltask_count;
68  ie_complete_call_back_t callback;
70 
71 #define APPEND_STRING(generated_string, iterate_string) \
72  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
73  av_asprintf("%s", iterate_string);
74 
75 #define OFFSET(x) offsetof(OVContext, x)
76 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
77 static const AVOption dnn_openvino_options[] = {
78  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
80  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
81  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
82  { NULL }
83 };
84 
85 AVFILTER_DEFINE_CLASS(dnn_openvino);
86 
87 static DNNDataType precision_to_datatype(precision_e precision)
88 {
89  switch (precision)
90  {
91  case FP32:
92  return DNN_FLOAT;
93  case U8:
94  return DNN_UINT8;
95  default:
96  av_assert0(!"not supported yet.");
97  return DNN_FLOAT;
98  }
99 }
100 
102 {
103  switch (dt)
104  {
105  case DNN_FLOAT:
106  return sizeof(float);
107  case DNN_UINT8:
108  return sizeof(uint8_t);
109  default:
110  av_assert0(!"not supported yet.");
111  return 1;
112  }
113 }
114 
115 static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
116 {
117  dimensions_t dims;
118  precision_e precision;
119  ie_blob_buffer_t blob_buffer;
120  OVContext *ctx = &ov_model->ctx;
121  IEStatusCode status;
122  DNNData input;
123  ie_blob_t *input_blob = NULL;
124  LastLevelTaskItem *lltask;
125  TaskItem *task;
126 
127  lltask = ff_queue_peek_front(ov_model->lltask_queue);
128  av_assert0(lltask);
129  task = lltask->task;
130 
131  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
132  if (status != OK) {
133  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
134  return DNN_GENERIC_ERROR;
135  }
136 
137  status |= ie_blob_get_dims(input_blob, &dims);
138  status |= ie_blob_get_precision(input_blob, &precision);
139  if (status != OK) {
140  ie_blob_free(&input_blob);
141  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
142  return DNN_GENERIC_ERROR;
143  }
144 
145  status = ie_blob_get_buffer(input_blob, &blob_buffer);
146  if (status != OK) {
147  ie_blob_free(&input_blob);
148  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
149  return DNN_GENERIC_ERROR;
150  }
151 
152  input.height = dims.dims[2];
153  input.width = dims.dims[3];
154  input.channels = dims.dims[1];
155  input.data = blob_buffer.buffer;
156  input.dt = precision_to_datatype(precision);
157  // all models in openvino open model zoo use BGR as input,
158  // change to be an option when necessary.
159  input.order = DCO_BGR;
160 
161  for (int i = 0; i < ctx->options.batch_size; ++i) {
162  lltask = ff_queue_pop_front(ov_model->lltask_queue);
163  if (!lltask) {
164  break;
165  }
166  request->lltasks[i] = lltask;
167  request->lltask_count = i + 1;
168  task = lltask->task;
169  switch (ov_model->model->func_type) {
170  case DFT_PROCESS_FRAME:
171  if (task->do_ioproc) {
172  if (ov_model->model->frame_pre_proc != NULL) {
173  ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
174  } else {
176  }
177  }
178  break;
181  break;
184  break;
185  default:
186  av_assert0(!"should not reach here");
187  break;
188  }
189  input.data = (uint8_t *)input.data
190  + input.width * input.height * input.channels * get_datatype_size(input.dt);
191  }
192  ie_blob_free(&input_blob);
193 
194  return 0;
195 }
196 
197 static void infer_completion_callback(void *args)
198 {
199  dimensions_t dims;
200  precision_e precision;
201  IEStatusCode status;
202  OVRequestItem *request = args;
203  LastLevelTaskItem *lltask = request->lltasks[0];
204  TaskItem *task = lltask->task;
205  OVModel *ov_model = task->model;
206  SafeQueue *requestq = ov_model->request_queue;
207  ie_blob_t *output_blob = NULL;
208  ie_blob_buffer_t blob_buffer;
209  DNNData output;
210  OVContext *ctx = &ov_model->ctx;
211 
212  status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
213  if (status != OK) {
214  //incorrect output name
215  char *model_output_name = NULL;
216  char *all_output_names = NULL;
217  size_t model_output_count = 0;
218  av_log(ctx, AV_LOG_ERROR, "Failed to get model output data\n");
219  status = ie_network_get_outputs_number(ov_model->network, &model_output_count);
220  for (size_t i = 0; i < model_output_count; i++) {
221  status = ie_network_get_output_name(ov_model->network, i, &model_output_name);
222  APPEND_STRING(all_output_names, model_output_name)
223  }
225  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
226  task->output_names[0], all_output_names);
227  return;
228  }
229 
230  status = ie_blob_get_buffer(output_blob, &blob_buffer);
231  if (status != OK) {
232  ie_blob_free(&output_blob);
233  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
234  return;
235  }
236 
237  status |= ie_blob_get_dims(output_blob, &dims);
238  status |= ie_blob_get_precision(output_blob, &precision);
239  if (status != OK) {
240  ie_blob_free(&output_blob);
241  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
242  return;
243  }
244 
245  output.channels = dims.dims[1];
246  output.height = dims.dims[2];
247  output.width = dims.dims[3];
248  output.dt = precision_to_datatype(precision);
249  output.data = blob_buffer.buffer;
250 
251  av_assert0(request->lltask_count <= dims.dims[0]);
252  av_assert0(request->lltask_count >= 1);
253  for (int i = 0; i < request->lltask_count; ++i) {
254  task = request->lltasks[i]->task;
255  task->inference_done++;
256 
257  switch (ov_model->model->func_type) {
258  case DFT_PROCESS_FRAME:
259  if (task->do_ioproc) {
260  if (ov_model->model->frame_post_proc != NULL) {
261  ov_model->model->frame_post_proc(task->out_frame, &output, ov_model->model->filter_ctx);
262  } else {
264  }
265  } else {
266  task->out_frame->width = output.width;
267  task->out_frame->height = output.height;
268  }
269  break;
271  if (!ov_model->model->detect_post_proc) {
272  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
273  return;
274  }
275  ov_model->model->detect_post_proc(task->in_frame, &output, 1, ov_model->model->filter_ctx);
276  break;
278  if (!ov_model->model->classify_post_proc) {
279  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
280  return;
281  }
282  ov_model->model->classify_post_proc(task->in_frame, &output, request->lltasks[i]->bbox_index, ov_model->model->filter_ctx);
283  break;
284  default:
285  av_assert0(!"should not reach here");
286  break;
287  }
288 
289  av_freep(&request->lltasks[i]);
290  output.data = (uint8_t *)output.data
291  + output.width * output.height * output.channels * get_datatype_size(output.dt);
292  }
293  ie_blob_free(&output_blob);
294 
295  request->lltask_count = 0;
296  if (ff_safe_queue_push_back(requestq, request) < 0) {
297  ie_infer_request_free(&request->infer_request);
298  av_freep(&request);
299  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
300  return;
301  }
302 }
303 
304 static int init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
305 {
306  int ret = 0;
307  OVContext *ctx = &ov_model->ctx;
308  IEStatusCode status;
309  ie_available_devices_t a_dev;
310  ie_config_t config = {NULL, NULL, NULL};
311  char *all_dev_names = NULL;
312 
313  // batch size
314  if (ctx->options.batch_size <= 0) {
315  ctx->options.batch_size = 1;
316  }
317 
318  if (ctx->options.batch_size > 1) {
319  input_shapes_t input_shapes;
320  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
321  if (status != OK) {
323  goto err;
324  }
325  for (int i = 0; i < input_shapes.shape_num; i++)
326  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
327  status = ie_network_reshape(ov_model->network, input_shapes);
328  ie_network_input_shapes_free(&input_shapes);
329  if (status != OK) {
331  goto err;
332  }
333  }
334 
335  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
336  // while we pass NHWC data from FFmpeg to openvino
337  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
338  if (status != OK) {
339  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
341  goto err;
342  }
343  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
344  if (status != OK) {
345  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
347  goto err;
348  }
349 
350  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
351  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
352  // ask openvino to do the conversion internally.
353  // the current supported SR model (frame processing) is generated from tensorflow model,
354  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
355  // TODO: we need to get a final clear&general solution with all backends/formats considered.
356  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
357  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
358  if (status != OK) {
359  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
361  goto err;
362  }
363  }
364 
365  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
366  if (status != OK) {
367  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
368  status = ie_core_get_available_devices(ov_model->core, &a_dev);
369  if (status != OK) {
370  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
372  goto err;
373  }
374  for (int i = 0; i < a_dev.num_devices; i++) {
375  APPEND_STRING(all_dev_names, a_dev.devices[i])
376  }
377  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
378  ctx->options.device_type, all_dev_names);
379  ret = AVERROR(ENODEV);
380  goto err;
381  }
382 
383  // create infer_requests for async execution
384  if (ctx->options.nireq <= 0) {
385  // the default value is a rough estimation
386  ctx->options.nireq = av_cpu_count() / 2 + 1;
387  }
388 
389  ov_model->request_queue = ff_safe_queue_create();
390  if (!ov_model->request_queue) {
391  ret = AVERROR(ENOMEM);
392  goto err;
393  }
394 
395  for (int i = 0; i < ctx->options.nireq; i++) {
396  OVRequestItem *item = av_mallocz(sizeof(*item));
397  if (!item) {
398  ret = AVERROR(ENOMEM);
399  goto err;
400  }
401 
402  item->callback.completeCallBackFunc = infer_completion_callback;
403  item->callback.args = item;
404  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
405  av_freep(&item);
406  ret = AVERROR(ENOMEM);
407  goto err;
408  }
409 
410  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
411  if (status != OK) {
413  goto err;
414  }
415 
416  item->lltasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->lltasks));
417  if (!item->lltasks) {
418  ret = AVERROR(ENOMEM);
419  goto err;
420  }
421  item->lltask_count = 0;
422  }
423 
424  ov_model->task_queue = ff_queue_create();
425  if (!ov_model->task_queue) {
426  ret = AVERROR(ENOMEM);
427  goto err;
428  }
429 
430  ov_model->lltask_queue = ff_queue_create();
431  if (!ov_model->lltask_queue) {
432  ret = AVERROR(ENOMEM);
433  goto err;
434  }
435 
436  return 0;
437 
438 err:
439  ff_dnn_free_model_ov(&ov_model->model);
440  return ret;
441 }
442 
443 static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
444 {
445  IEStatusCode status;
446  LastLevelTaskItem *lltask;
447  int ret = 0;
448  TaskItem *task;
449  OVContext *ctx;
450  OVModel *ov_model;
451 
452  if (ff_queue_size(inferenceq) == 0) {
453  ie_infer_request_free(&request->infer_request);
454  av_freep(&request);
455  return 0;
456  }
457 
458  lltask = ff_queue_peek_front(inferenceq);
459  task = lltask->task;
460  ov_model = task->model;
461  ctx = &ov_model->ctx;
462 
463  if (task->async) {
464  ret = fill_model_input_ov(ov_model, request);
465  if (ret != 0) {
466  goto err;
467  }
468  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
469  if (status != OK) {
470  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
472  goto err;
473  }
474  status = ie_infer_request_infer_async(request->infer_request);
475  if (status != OK) {
476  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
478  goto err;
479  }
480  return 0;
481  } else {
482  ret = fill_model_input_ov(ov_model, request);
483  if (ret != 0) {
484  goto err;
485  }
486  status = ie_infer_request_infer(request->infer_request);
487  if (status != OK) {
488  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
490  goto err;
491  }
492  infer_completion_callback(request);
493  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
494  }
495 err:
496  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
497  ie_infer_request_free(&request->infer_request);
498  av_freep(&request);
499  }
500  return ret;
501 }
502 
503 static int get_input_ov(void *model, DNNData *input, const char *input_name)
504 {
505  OVModel *ov_model = model;
506  OVContext *ctx = &ov_model->ctx;
507  char *model_input_name = NULL;
508  char *all_input_names = NULL;
509  IEStatusCode status;
510  size_t model_input_count = 0;
511  dimensions_t dims;
512  precision_e precision;
513  int input_resizable = ctx->options.input_resizable;
514 
515  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
516  if (status != OK) {
517  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
518  return DNN_GENERIC_ERROR;
519  }
520 
521  for (size_t i = 0; i < model_input_count; i++) {
522  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
523  if (status != OK) {
524  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
525  return DNN_GENERIC_ERROR;
526  }
527  if (strcmp(model_input_name, input_name) == 0) {
528  ie_network_name_free(&model_input_name);
529  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
530  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
531  if (status != OK) {
532  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
533  return DNN_GENERIC_ERROR;
534  }
535 
536  input->channels = dims.dims[1];
537  input->height = input_resizable ? -1 : dims.dims[2];
538  input->width = input_resizable ? -1 : dims.dims[3];
539  input->dt = precision_to_datatype(precision);
540  return 0;
541  } else {
542  //incorrect input name
543  APPEND_STRING(all_input_names, model_input_name)
544  }
545 
546  ie_network_name_free(&model_input_name);
547  }
548 
549  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, all_input_names);
550  return AVERROR(EINVAL);
551 }
552 
554 {
555  AVFrameSideData *sd;
557  const AVDetectionBBox *bbox;
558 
560  if (!sd) { // this frame has nothing detected
561  return 0;
562  }
563 
564  if (!sd->size) {
565  return 0;
566  }
567 
568  header = (const AVDetectionBBoxHeader *)sd->data;
569  if (!header->nb_bboxes) {
570  return 0;
571  }
572 
573  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
574  bbox = av_get_detection_bbox(header, i);
575  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
576  return 0;
577  }
578  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->width) {
579  return 0;
580  }
581 
583  return 0;
584  }
585  }
586 
587  return 1;
588 }
589 
590 static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
591 {
592  switch (func_type) {
593  case DFT_PROCESS_FRAME:
595  {
596  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
597  if (!lltask) {
598  return AVERROR(ENOMEM);
599  }
600  task->inference_todo = 1;
601  task->inference_done = 0;
602  lltask->task = task;
603  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
604  av_freep(&lltask);
605  return AVERROR(ENOMEM);
606  }
607  return 0;
608  }
610  {
612  AVFrame *frame = task->in_frame;
613  AVFrameSideData *sd;
615 
616  task->inference_todo = 0;
617  task->inference_done = 0;
618 
620  return 0;
621  }
622 
624  header = (const AVDetectionBBoxHeader *)sd->data;
625 
626  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
627  LastLevelTaskItem *lltask;
629 
630  if (params->target) {
631  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
632  continue;
633  }
634  }
635 
636  lltask = av_malloc(sizeof(*lltask));
637  if (!lltask) {
638  return AVERROR(ENOMEM);
639  }
640  task->inference_todo++;
641  lltask->task = task;
642  lltask->bbox_index = i;
643  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
644  av_freep(&lltask);
645  return AVERROR(ENOMEM);
646  }
647  }
648  return 0;
649  }
650  default:
651  av_assert0(!"should not reach here");
652  return AVERROR(EINVAL);
653  }
654 }
655 
656 static int get_output_ov(void *model, const char *input_name, int input_width, int input_height,
657  const char *output_name, int *output_width, int *output_height)
658 {
659  int ret;
660  OVModel *ov_model = model;
661  OVContext *ctx = &ov_model->ctx;
662  TaskItem task;
663  OVRequestItem *request;
664  IEStatusCode status;
665  input_shapes_t input_shapes;
666  DNNExecBaseParams exec_params = {
667  .input_name = input_name,
668  .output_names = &output_name,
669  .nb_output = 1,
670  .in_frame = NULL,
671  .out_frame = NULL,
672  };
673 
674  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
675  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
676  return AVERROR(EINVAL);
677  }
678 
679  if (ctx->options.input_resizable) {
680  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
681  input_shapes.shapes->shape.dims[2] = input_height;
682  input_shapes.shapes->shape.dims[3] = input_width;
683  status |= ie_network_reshape(ov_model->network, input_shapes);
684  ie_network_input_shapes_free(&input_shapes);
685  if (status != OK) {
686  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
687  return DNN_GENERIC_ERROR;
688  }
689  }
690 
691  if (!ov_model->exe_network) {
692  ret = init_model_ov(ov_model, input_name, output_name);
693  if (ret != 0) {
694  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
695  return ret;
696  }
697  }
698 
699  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx);
700  if (ret != 0) {
701  goto err;
702  }
703 
704  ret = extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL);
705  if (ret != 0) {
706  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
707  goto err;
708  }
709 
710  request = ff_safe_queue_pop_front(ov_model->request_queue);
711  if (!request) {
712  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
713  ret = AVERROR(EINVAL);
714  goto err;
715  }
716 
717  ret = execute_model_ov(request, ov_model->lltask_queue);
718  *output_width = task.out_frame->width;
719  *output_height = task.out_frame->height;
720 err:
721  av_frame_free(&task.out_frame);
722  av_frame_free(&task.in_frame);
723  return ret;
724 }
725 
726 DNNModel *ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
727 {
728  DNNModel *model = NULL;
729  OVModel *ov_model = NULL;
730  OVContext *ctx = NULL;
731  IEStatusCode status;
732 
733  model = av_mallocz(sizeof(DNNModel));
734  if (!model){
735  return NULL;
736  }
737 
738  ov_model = av_mallocz(sizeof(OVModel));
739  if (!ov_model) {
740  av_freep(&model);
741  return NULL;
742  }
743  model->model = ov_model;
744  ov_model->model = model;
745  ov_model->ctx.class = &dnn_openvino_class;
746  ctx = &ov_model->ctx;
747 
748  //parse options
750  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
751  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
752  goto err;
753  }
754 
755  status = ie_core_create("", &ov_model->core);
756  if (status != OK)
757  goto err;
758 
759  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
760  if (status != OK) {
761  ie_version_t ver;
762  ver = ie_c_api_version();
763  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
764  "Please check if the model version matches the runtime OpenVINO %s\n",
765  model_filename, ver.api_version);
766  ie_version_free(&ver);
767  goto err;
768  }
769 
770  model->get_input = &get_input_ov;
771  model->get_output = &get_output_ov;
772  model->options = options;
773  model->filter_ctx = filter_ctx;
774  model->func_type = func_type;
775 
776  return model;
777 
778 err:
779  ff_dnn_free_model_ov(&model);
780  return NULL;
781 }
782 
783 int ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
784 {
785  OVModel *ov_model = model->model;
786  OVContext *ctx = &ov_model->ctx;
787  OVRequestItem *request;
788  TaskItem *task;
789  int ret;
790 
791  ret = ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params);
792  if (ret != 0) {
793  return ret;
794  }
795 
796  if (!ov_model->exe_network) {
797  ret = init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]);
798  if (ret != 0) {
799  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
800  return ret;
801  }
802  }
803 
804  task = av_malloc(sizeof(*task));
805  if (!task) {
806  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
807  return AVERROR(ENOMEM);
808  }
809 
810  ret = ff_dnn_fill_task(task, exec_params, ov_model, ctx->options.async, 1);
811  if (ret != 0) {
812  av_freep(&task);
813  return ret;
814  }
815 
816  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
817  av_freep(&task);
818  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
819  return AVERROR(ENOMEM);
820  }
821 
822  ret = extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params);
823  if (ret != 0) {
824  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
825  return ret;
826  }
827 
828  if (ctx->options.async) {
829  while (ff_queue_size(ov_model->lltask_queue) >= ctx->options.batch_size) {
830  request = ff_safe_queue_pop_front(ov_model->request_queue);
831  if (!request) {
832  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
833  return AVERROR(EINVAL);
834  }
835 
836  ret = execute_model_ov(request, ov_model->lltask_queue);
837  if (ret != 0) {
838  return ret;
839  }
840  }
841 
842  return 0;
843  }
844  else {
845  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
846  // Classification filter has not been completely
847  // tested with the sync mode. So, do not support now.
848  avpriv_report_missing_feature(ctx, "classify for sync execution");
849  return AVERROR(ENOSYS);
850  }
851 
852  if (ctx->options.batch_size > 1) {
853  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
854  return AVERROR(ENOSYS);
855  }
856 
857  request = ff_safe_queue_pop_front(ov_model->request_queue);
858  if (!request) {
859  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
860  return AVERROR(EINVAL);
861  }
862  return execute_model_ov(request, ov_model->lltask_queue);
863  }
864 }
865 
867 {
868  OVModel *ov_model = model->model;
869  return ff_dnn_get_result_common(ov_model->task_queue, in, out);
870 }
871 
872 int ff_dnn_flush_ov(const DNNModel *model)
873 {
874  OVModel *ov_model = model->model;
875  OVContext *ctx = &ov_model->ctx;
876  OVRequestItem *request;
877  IEStatusCode status;
878  int ret;
879 
880  if (ff_queue_size(ov_model->lltask_queue) == 0) {
881  // no pending task need to flush
882  return 0;
883  }
884 
885  request = ff_safe_queue_pop_front(ov_model->request_queue);
886  if (!request) {
887  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
888  return AVERROR(EINVAL);
889  }
890 
891  ret = fill_model_input_ov(ov_model, request);
892  if (ret != 0) {
893  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
894  return ret;
895  }
896  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
897  if (status != OK) {
898  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
899  return DNN_GENERIC_ERROR;
900  }
901  status = ie_infer_request_infer_async(request->infer_request);
902  if (status != OK) {
903  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
904  return DNN_GENERIC_ERROR;
905  }
906 
907  return 0;
908 }
909 
911 {
912  if (*model){
913  OVModel *ov_model = (*model)->model;
914  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
916  if (item && item->infer_request) {
917  ie_infer_request_free(&item->infer_request);
918  }
919  av_freep(&item->lltasks);
920  av_freep(&item);
921  }
923 
924  while (ff_queue_size(ov_model->lltask_queue) != 0) {
926  av_freep(&item);
927  }
928  ff_queue_destroy(ov_model->lltask_queue);
929 
930  while (ff_queue_size(ov_model->task_queue) != 0) {
931  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
932  av_frame_free(&item->in_frame);
933  av_frame_free(&item->out_frame);
934  av_freep(&item);
935  }
936  ff_queue_destroy(ov_model->task_queue);
937 
938  if (ov_model->exe_network)
939  ie_exec_network_free(&ov_model->exe_network);
940  if (ov_model->network)
941  ie_network_free(&ov_model->network);
942  if (ov_model->core)
943  ie_core_free(&ov_model->core);
944  av_freep(&ov_model);
945  av_freep(model);
946  }
947 }
OVModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_openvino.c:60
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
OVRequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:68
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
OVContext::class
const AVClass * class
Definition: dnn_backend_openvino.c:48
opt.h
filter_ctx
static FilteringContext * filter_ctx
Definition: transcoding.c:49
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1458
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:684
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:57
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:29
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
get_input_ov
static int get_input_ov(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:503
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
LastLevelTaskItem
Definition: dnn_backend_common.h:50
LastLevelTaskItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_common.h:52
test::height
int height
Definition: vc1dsp.c:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
AVFrame::width
int width
Definition: frame.h:397
OVOptions::async
uint8_t async
Definition: dnn_backend_openvino.c:42
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_openvino)
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:251
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:101
ff_dnn_load_model_ov
DNNModel * ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_backend_openvino.c:726
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:55
FLAGS
#define FLAGS
Definition: cmdutils.c:504
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:68
dnn_io_proc.h
TaskItem
Definition: dnn_backend_common.h:36
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:52
OVOptions::batch_size
int batch_size
Definition: dnn_backend_openvino.c:43
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:90
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
ff_dnn_flush_ov
int ff_dnn_flush_ov(const DNNModel *model)
Definition: dnn_backend_openvino.c:872
TaskItem::model
void * model
Definition: dnn_backend_common.h:37
OVOptions::device_type
char * device_type
Definition: dnn_backend_openvino.c:40
Queue
Linear double-ended data structure.
Definition: queue.c:33
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:31
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFrameSideData::size
size_t size
Definition: frame.h:234
float
float
Definition: af_crystalizer.c:122
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:51
DNNExecClassificationParams
Definition: dnn_interface.h:75
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
ff_dnn_free_model_ov
void ff_dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:910
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
DNNData
Definition: dnn_interface.h:59
execute_model_ov
static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:443
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:162
ctx
AVFormatContext * ctx
Definition: movenc.c:48
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:45
OVRequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:65
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:35
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
if
if(ret)
Definition: filter_design.txt:179
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:100
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:77
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:254
NULL
#define NULL
Definition: coverity.c:32
OVRequestItem::lltask_count
uint32_t lltask_count
Definition: dnn_backend_openvino.c:67
init_model_ov
static int init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
Definition: dnn_backend_openvino.c:304
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:56
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:104
get_output_ov
static int get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:656
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1666
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:38
OVRequestItem::lltasks
LastLevelTaskItem ** lltasks
Definition: dnn_backend_openvino.c:66
OVModel::ctx
OVContext ctx
Definition: dnn_backend_openvino.c:53
OVRequestItem
Definition: dnn_backend_openvino.c:64
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:194
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:228
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:101
options
const OptionDef options[]
test::width
int width
Definition: vc1dsp.c:38
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:42
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:46
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
cpu.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
OVModel::model
DNNModel * model
Definition: dnn_backend_openvino.c:54
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:106
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:92
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:233
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
header
static const uint8_t header[24]
Definition: sdr2.c:67
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:56
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:553
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
OVOptions::input_resizable
int input_resizable
Definition: dnn_backend_openvino.c:44
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:77
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:197
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
precision_to_datatype
static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:87
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
safe_queue.h
ff_dnn_execute_model_ov
int ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:783
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:58
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:41
OVContext
Definition: dnn_backend_openvino.c:47
dnn_backend_openvino.h
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:108
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNNModel::get_input
int(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:95
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:39
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:59
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:56
AVFrame::height
int height
Definition: frame.h:397
extract_lltask_from_task
static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:590
dnn_backend_common.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:142
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:69
AVFilterContext
An instance of a filter.
Definition: avfilter.h:408
DNNModel
Definition: dnn_interface.h:84
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:231
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:40
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
DNNModel::options
const char * options
Definition: dnn_interface.h:88
OVOptions::nireq
int nireq
Definition: dnn_backend_openvino.c:41
ff_frame_to_dnn_classify
int ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:193
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
OVOptions
Definition: dnn_backend_openvino.c:39
DNNExecBaseParams
Definition: dnn_interface.h:67
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVDetectionBBox
Definition: detection_bbox.h:26
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:43
DNNModel::get_output
int(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:97
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:41
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
detection_bbox.h
fill_model_input_ov
static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
Definition: dnn_backend_openvino.c:115
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:190
OVContext::options
OVOptions options
Definition: dnn_backend_openvino.c:49
ff_dnn_get_result_ov
DNNAsyncStatusType ff_dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_openvino.c:866
DNNModel::model
void * model
Definition: dnn_interface.h:86
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:27