FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_backend_openvino.h"
27 #include "dnn_io_proc.h"
28 #include "libavformat/avio.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/avstring.h"
33 #include "../internal.h"
34 #include "queue.h"
35 #include "safe_queue.h"
36 #include <c_api/ie_c_api.h>
37 #include "dnn_backend_common.h"
38 
39 typedef struct OVOptions{
40  char *device_type;
41  int nireq;
44 } OVOptions;
45 
46 typedef struct OVContext {
47  const AVClass *class;
49 } OVContext;
50 
51 typedef struct OVModel{
54  ie_core_t *core;
55  ie_network_t *network;
56  ie_executable_network_t *exe_network;
57  SafeQueue *request_queue; // holds RequestItem
58  Queue *task_queue; // holds TaskItem
59  Queue *inference_queue; // holds InferenceItem
60 } OVModel;
61 
62 // one request for one call to openvino
63 typedef struct RequestItem {
64  ie_infer_request_t *infer_request;
66  uint32_t inference_count;
67  ie_complete_call_back_t callback;
68 } RequestItem;
69 
70 #define APPEND_STRING(generated_string, iterate_string) \
71  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
72  av_asprintf("%s", iterate_string);
73 
74 #define OFFSET(x) offsetof(OVContext, x)
75 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
76 static const AVOption dnn_openvino_options[] = {
77  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
78  { "nireq", "number of request", OFFSET(options.nireq), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
79  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
80  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
81  { NULL }
82 };
83 
84 AVFILTER_DEFINE_CLASS(dnn_openvino);
85 
86 static DNNDataType precision_to_datatype(precision_e precision)
87 {
88  switch (precision)
89  {
90  case FP32:
91  return DNN_FLOAT;
92  case U8:
93  return DNN_UINT8;
94  default:
95  av_assert0(!"not supported yet.");
96  return DNN_FLOAT;
97  }
98 }
99 
101 {
102  switch (dt)
103  {
104  case DNN_FLOAT:
105  return sizeof(float);
106  case DNN_UINT8:
107  return sizeof(uint8_t);
108  default:
109  av_assert0(!"not supported yet.");
110  return 1;
111  }
112 }
113 
115 {
116  dimensions_t dims;
117  precision_e precision;
118  ie_blob_buffer_t blob_buffer;
119  OVContext *ctx = &ov_model->ctx;
120  IEStatusCode status;
121  DNNData input;
122  ie_blob_t *input_blob = NULL;
123  InferenceItem *inference;
124  TaskItem *task;
125 
126  inference = ff_queue_peek_front(ov_model->inference_queue);
127  av_assert0(inference);
128  task = inference->task;
129 
130  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
131  if (status != OK) {
132  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
133  return DNN_ERROR;
134  }
135 
136  status |= ie_blob_get_dims(input_blob, &dims);
137  status |= ie_blob_get_precision(input_blob, &precision);
138  if (status != OK) {
139  ie_blob_free(&input_blob);
140  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
141  return DNN_ERROR;
142  }
143 
144  status = ie_blob_get_buffer(input_blob, &blob_buffer);
145  if (status != OK) {
146  ie_blob_free(&input_blob);
147  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
148  return DNN_ERROR;
149  }
150 
151  input.height = dims.dims[2];
152  input.width = dims.dims[3];
153  input.channels = dims.dims[1];
154  input.data = blob_buffer.buffer;
155  input.dt = precision_to_datatype(precision);
156  // all models in openvino open model zoo use BGR as input,
157  // change to be an option when necessary.
158  input.order = DCO_BGR;
159 
160  for (int i = 0; i < ctx->options.batch_size; ++i) {
161  inference = ff_queue_pop_front(ov_model->inference_queue);
162  if (!inference) {
163  break;
164  }
165  request->inferences[i] = inference;
166  request->inference_count = i + 1;
167  task = inference->task;
168  switch (ov_model->model->func_type) {
169  case DFT_PROCESS_FRAME:
170  if (task->do_ioproc) {
171  if (ov_model->model->frame_pre_proc != NULL) {
172  ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
173  } else {
175  }
176  }
177  break;
180  break;
182  ff_frame_to_dnn_classify(task->in_frame, &input, inference->bbox_index, ctx);
183  break;
184  default:
185  av_assert0(!"should not reach here");
186  break;
187  }
188  input.data = (uint8_t *)input.data
189  + input.width * input.height * input.channels * get_datatype_size(input.dt);
190  }
191  ie_blob_free(&input_blob);
192 
193  return DNN_SUCCESS;
194 }
195 
196 static void infer_completion_callback(void *args)
197 {
198  dimensions_t dims;
199  precision_e precision;
200  IEStatusCode status;
201  RequestItem *request = args;
202  InferenceItem *inference = request->inferences[0];
203  TaskItem *task = inference->task;
204  OVModel *ov_model = task->model;
205  SafeQueue *requestq = ov_model->request_queue;
206  ie_blob_t *output_blob = NULL;
207  ie_blob_buffer_t blob_buffer;
208  DNNData output;
209  OVContext *ctx = &ov_model->ctx;
210 
211  status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
212  if (status != OK) {
213  //incorrect output name
214  char *model_output_name = NULL;
215  char *all_output_names = NULL;
216  size_t model_output_count = 0;
217  av_log(ctx, AV_LOG_ERROR, "Failed to get model output data\n");
218  status = ie_network_get_outputs_number(ov_model->network, &model_output_count);
219  for (size_t i = 0; i < model_output_count; i++) {
220  status = ie_network_get_output_name(ov_model->network, i, &model_output_name);
221  APPEND_STRING(all_output_names, model_output_name)
222  }
224  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
225  task->output_names[0], all_output_names);
226  return;
227  }
228 
229  status = ie_blob_get_buffer(output_blob, &blob_buffer);
230  if (status != OK) {
231  ie_blob_free(&output_blob);
232  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
233  return;
234  }
235 
236  status |= ie_blob_get_dims(output_blob, &dims);
237  status |= ie_blob_get_precision(output_blob, &precision);
238  if (status != OK) {
239  ie_blob_free(&output_blob);
240  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
241  return;
242  }
243 
244  output.channels = dims.dims[1];
245  output.height = dims.dims[2];
246  output.width = dims.dims[3];
247  output.dt = precision_to_datatype(precision);
248  output.data = blob_buffer.buffer;
249 
250  av_assert0(request->inference_count <= dims.dims[0]);
251  av_assert0(request->inference_count >= 1);
252  for (int i = 0; i < request->inference_count; ++i) {
253  task = request->inferences[i]->task;
254  task->inference_done++;
255 
256  switch (ov_model->model->func_type) {
257  case DFT_PROCESS_FRAME:
258  if (task->do_ioproc) {
259  if (ov_model->model->frame_post_proc != NULL) {
260  ov_model->model->frame_post_proc(task->out_frame, &output, ov_model->model->filter_ctx);
261  } else {
263  }
264  } else {
265  task->out_frame->width = output.width;
266  task->out_frame->height = output.height;
267  }
268  break;
270  if (!ov_model->model->detect_post_proc) {
271  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
272  return;
273  }
274  ov_model->model->detect_post_proc(task->out_frame, &output, 1, ov_model->model->filter_ctx);
275  break;
277  if (!ov_model->model->classify_post_proc) {
278  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
279  return;
280  }
281  ov_model->model->classify_post_proc(task->out_frame, &output, request->inferences[i]->bbox_index, ov_model->model->filter_ctx);
282  break;
283  default:
284  av_assert0(!"should not reach here");
285  break;
286  }
287 
288  av_freep(&request->inferences[i]);
289  output.data = (uint8_t *)output.data
290  + output.width * output.height * output.channels * get_datatype_size(output.dt);
291  }
292  ie_blob_free(&output_blob);
293 
294  request->inference_count = 0;
295  if (ff_safe_queue_push_back(requestq, request) < 0) {
296  ie_infer_request_free(&request->infer_request);
297  av_freep(&request);
298  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
299  return;
300  }
301 }
302 
303 static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
304 {
305  OVContext *ctx = &ov_model->ctx;
306  IEStatusCode status;
307  ie_available_devices_t a_dev;
308  ie_config_t config = {NULL, NULL, NULL};
309  char *all_dev_names = NULL;
310 
311  // batch size
312  if (ctx->options.batch_size <= 0) {
313  ctx->options.batch_size = 1;
314  }
315 
316  if (ctx->options.batch_size > 1) {
317  input_shapes_t input_shapes;
318  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
319  if (status != OK)
320  goto err;
321  for (int i = 0; i < input_shapes.shape_num; i++)
322  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
323  status = ie_network_reshape(ov_model->network, input_shapes);
324  ie_network_input_shapes_free(&input_shapes);
325  if (status != OK)
326  goto err;
327  }
328 
329  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
330  // while we pass NHWC data from FFmpeg to openvino
331  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
332  if (status != OK) {
333  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
334  goto err;
335  }
336  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
337  if (status != OK) {
338  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
339  goto err;
340  }
341 
342  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
343  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
344  // ask openvino to do the conversion internally.
345  // the current supported SR model (frame processing) is generated from tensorflow model,
346  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
347  // TODO: we need to get a final clear&general solution with all backends/formats considered.
348  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
349  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
350  if (status != OK) {
351  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
352  goto err;
353  }
354  }
355 
356  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
357  if (status != OK) {
358  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
359  status = ie_core_get_available_devices(ov_model->core, &a_dev);
360  if (status != OK) {
361  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
362  goto err;
363  }
364  for (int i = 0; i < a_dev.num_devices; i++) {
365  APPEND_STRING(all_dev_names, a_dev.devices[i])
366  }
367  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
368  ctx->options.device_type, all_dev_names);
369  goto err;
370  }
371 
372  // create infer_requests for async execution
373  if (ctx->options.nireq <= 0) {
374  // the default value is a rough estimation
375  ctx->options.nireq = av_cpu_count() / 2 + 1;
376  }
377 
378  ov_model->request_queue = ff_safe_queue_create();
379  if (!ov_model->request_queue) {
380  goto err;
381  }
382 
383  for (int i = 0; i < ctx->options.nireq; i++) {
384  RequestItem *item = av_mallocz(sizeof(*item));
385  if (!item) {
386  goto err;
387  }
388 
389  item->callback.completeCallBackFunc = infer_completion_callback;
390  item->callback.args = item;
391  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
392  av_freep(&item);
393  goto err;
394  }
395 
396  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
397  if (status != OK) {
398  goto err;
399  }
400 
401  item->inferences = av_malloc_array(ctx->options.batch_size, sizeof(*item->inferences));
402  if (!item->inferences) {
403  goto err;
404  }
405  item->inference_count = 0;
406  }
407 
408  ov_model->task_queue = ff_queue_create();
409  if (!ov_model->task_queue) {
410  goto err;
411  }
412 
413  ov_model->inference_queue = ff_queue_create();
414  if (!ov_model->inference_queue) {
415  goto err;
416  }
417 
418  return DNN_SUCCESS;
419 
420 err:
421  ff_dnn_free_model_ov(&ov_model->model);
422  return DNN_ERROR;
423 }
424 
425 static DNNReturnType execute_model_ov(RequestItem *request, Queue *inferenceq)
426 {
427  IEStatusCode status;
429  InferenceItem *inference;
430  TaskItem *task;
431  OVContext *ctx;
432  OVModel *ov_model;
433 
434  if (ff_queue_size(inferenceq) == 0) {
435  return DNN_SUCCESS;
436  }
437 
438  inference = ff_queue_peek_front(inferenceq);
439  task = inference->task;
440  ov_model = task->model;
441  ctx = &ov_model->ctx;
442 
443  if (task->async) {
444  ret = fill_model_input_ov(ov_model, request);
445  if (ret != DNN_SUCCESS) {
446  return ret;
447  }
448  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
449  if (status != OK) {
450  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
451  return DNN_ERROR;
452  }
453  status = ie_infer_request_infer_async(request->infer_request);
454  if (status != OK) {
455  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
456  return DNN_ERROR;
457  }
458  return DNN_SUCCESS;
459  } else {
460  ret = fill_model_input_ov(ov_model, request);
461  if (ret != DNN_SUCCESS) {
462  return ret;
463  }
464  status = ie_infer_request_infer(request->infer_request);
465  if (status != OK) {
466  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
467  return DNN_ERROR;
468  }
469  infer_completion_callback(request);
470  return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_ERROR;
471  }
472 }
473 
474 static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
475 {
476  OVModel *ov_model = model;
477  OVContext *ctx = &ov_model->ctx;
478  char *model_input_name = NULL;
479  char *all_input_names = NULL;
480  IEStatusCode status;
481  size_t model_input_count = 0;
482  dimensions_t dims;
483  precision_e precision;
484  int input_resizable = ctx->options.input_resizable;
485 
486  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
487  if (status != OK) {
488  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
489  return DNN_ERROR;
490  }
491 
492  for (size_t i = 0; i < model_input_count; i++) {
493  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
494  if (status != OK) {
495  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
496  return DNN_ERROR;
497  }
498  if (strcmp(model_input_name, input_name) == 0) {
499  ie_network_name_free(&model_input_name);
500  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
501  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
502  if (status != OK) {
503  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
504  return DNN_ERROR;
505  }
506 
507  input->channels = dims.dims[1];
508  input->height = input_resizable ? -1 : dims.dims[2];
509  input->width = input_resizable ? -1 : dims.dims[3];
510  input->dt = precision_to_datatype(precision);
511  return DNN_SUCCESS;
512  } else {
513  //incorrect input name
514  APPEND_STRING(all_input_names, model_input_name)
515  }
516 
517  ie_network_name_free(&model_input_name);
518  }
519 
520  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, all_input_names);
521  return DNN_ERROR;
522 }
523 
525 {
526  AVFrameSideData *sd;
528  const AVDetectionBBox *bbox;
529 
531  if (!sd) { // this frame has nothing detected
532  return 0;
533  }
534 
535  if (!sd->size) {
536  return 0;
537  }
538 
539  header = (const AVDetectionBBoxHeader *)sd->data;
540  if (!header->nb_bboxes) {
541  return 0;
542  }
543 
544  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
545  bbox = av_get_detection_bbox(header, i);
546  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
547  return 0;
548  }
549  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->width) {
550  return 0;
551  }
552 
554  return 0;
555  }
556  }
557 
558  return 1;
559 }
560 
561 static DNNReturnType extract_inference_from_task(DNNFunctionType func_type, TaskItem *task, Queue *inference_queue, DNNExecBaseParams *exec_params)
562 {
563  switch (func_type) {
564  case DFT_PROCESS_FRAME:
566  {
567  InferenceItem *inference = av_malloc(sizeof(*inference));
568  if (!inference) {
569  return DNN_ERROR;
570  }
571  task->inference_todo = 1;
572  task->inference_done = 0;
573  inference->task = task;
574  if (ff_queue_push_back(inference_queue, inference) < 0) {
575  av_freep(&inference);
576  return DNN_ERROR;
577  }
578  return DNN_SUCCESS;
579  }
581  {
583  AVFrame *frame = task->in_frame;
584  AVFrameSideData *sd;
586 
587  task->inference_todo = 0;
588  task->inference_done = 0;
589 
591  return DNN_SUCCESS;
592  }
593 
595  header = (const AVDetectionBBoxHeader *)sd->data;
596 
597  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
598  InferenceItem *inference;
600 
601  if (params->target) {
602  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
603  continue;
604  }
605  }
606 
607  inference = av_malloc(sizeof(*inference));
608  if (!inference) {
609  return DNN_ERROR;
610  }
611  task->inference_todo++;
612  inference->task = task;
613  inference->bbox_index = i;
614  if (ff_queue_push_back(inference_queue, inference) < 0) {
615  av_freep(&inference);
616  return DNN_ERROR;
617  }
618  }
619  return DNN_SUCCESS;
620  }
621  default:
622  av_assert0(!"should not reach here");
623  return DNN_ERROR;
624  }
625 }
626 
627 static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height,
628  const char *output_name, int *output_width, int *output_height)
629 {
631  OVModel *ov_model = model;
632  OVContext *ctx = &ov_model->ctx;
633  TaskItem task;
634  RequestItem *request;
635  AVFrame *in_frame = NULL;
636  AVFrame *out_frame = NULL;
637  IEStatusCode status;
638  input_shapes_t input_shapes;
639 
640  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
641  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
642  return DNN_ERROR;
643  }
644 
645  if (ctx->options.input_resizable) {
646  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
647  input_shapes.shapes->shape.dims[2] = input_height;
648  input_shapes.shapes->shape.dims[3] = input_width;
649  status |= ie_network_reshape(ov_model->network, input_shapes);
650  ie_network_input_shapes_free(&input_shapes);
651  if (status != OK) {
652  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
653  return DNN_ERROR;
654  }
655  }
656 
657  if (!ov_model->exe_network) {
658  if (init_model_ov(ov_model, input_name, output_name) != DNN_SUCCESS) {
659  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
660  return DNN_ERROR;
661  }
662  }
663 
664  in_frame = av_frame_alloc();
665  if (!in_frame) {
666  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input frame\n");
667  return DNN_ERROR;
668  }
669  in_frame->width = input_width;
670  in_frame->height = input_height;
671 
672  out_frame = av_frame_alloc();
673  if (!out_frame) {
674  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output frame\n");
675  av_frame_free(&in_frame);
676  return DNN_ERROR;
677  }
678 
679  task.do_ioproc = 0;
680  task.async = 0;
681  task.input_name = input_name;
682  task.in_frame = in_frame;
683  task.output_names = &output_name;
684  task.out_frame = out_frame;
685  task.nb_output = 1;
686  task.model = ov_model;
687 
688  if (extract_inference_from_task(ov_model->model->func_type, &task, ov_model->inference_queue, NULL) != DNN_SUCCESS) {
689  av_frame_free(&out_frame);
690  av_frame_free(&in_frame);
691  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
692  return DNN_ERROR;
693  }
694 
695  request = ff_safe_queue_pop_front(ov_model->request_queue);
696  if (!request) {
697  av_frame_free(&out_frame);
698  av_frame_free(&in_frame);
699  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
700  return DNN_ERROR;
701  }
702 
703  ret = execute_model_ov(request, ov_model->inference_queue);
704  *output_width = out_frame->width;
705  *output_height = out_frame->height;
706 
707  av_frame_free(&out_frame);
708  av_frame_free(&in_frame);
709  return ret;
710 }
711 
712 DNNModel *ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
713 {
714  DNNModel *model = NULL;
715  OVModel *ov_model = NULL;
716  OVContext *ctx = NULL;
717  IEStatusCode status;
718 
719  model = av_mallocz(sizeof(DNNModel));
720  if (!model){
721  return NULL;
722  }
723 
724  ov_model = av_mallocz(sizeof(OVModel));
725  if (!ov_model) {
726  av_freep(&model);
727  return NULL;
728  }
729  model->model = ov_model;
730  ov_model->model = model;
731  ov_model->ctx.class = &dnn_openvino_class;
732  ctx = &ov_model->ctx;
733 
734  //parse options
736  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
737  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
738  goto err;
739  }
740 
741  status = ie_core_create("", &ov_model->core);
742  if (status != OK)
743  goto err;
744 
745  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
746  if (status != OK) {
747  ie_version_t ver;
748  ver = ie_c_api_version();
749  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
750  "Please check if the model version matches the runtime OpenVINO %s\n",
751  model_filename, ver.api_version);
752  ie_version_free(&ver);
753  goto err;
754  }
755 
756  model->get_input = &get_input_ov;
757  model->get_output = &get_output_ov;
758  model->options = options;
759  model->filter_ctx = filter_ctx;
760  model->func_type = func_type;
761 
762  return model;
763 
764 err:
765  ff_dnn_free_model_ov(&model);
766  return NULL;
767 }
768 
770 {
771  OVModel *ov_model = model->model;
772  OVContext *ctx = &ov_model->ctx;
773  TaskItem task;
774  RequestItem *request;
775 
776  if (ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params) != 0) {
777  return DNN_ERROR;
778  }
779 
780  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
781  // Once we add async support for tensorflow backend and native backend,
782  // we'll combine the two sync/async functions in dnn_interface.h to
783  // simplify the code in filter, and async will be an option within backends.
784  // so, do not support now, and classify filter will not call this function.
785  return DNN_ERROR;
786  }
787 
788  if (ctx->options.batch_size > 1) {
789  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
790  return DNN_ERROR;
791  }
792 
793  if (!ov_model->exe_network) {
794  if (init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]) != DNN_SUCCESS) {
795  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
796  return DNN_ERROR;
797  }
798  }
799 
800  if (ff_dnn_fill_task(&task, exec_params, ov_model, 0, 1) != DNN_SUCCESS) {
801  return DNN_ERROR;
802  }
803 
804  if (extract_inference_from_task(ov_model->model->func_type, &task, ov_model->inference_queue, exec_params) != DNN_SUCCESS) {
805  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
806  return DNN_ERROR;
807  }
808 
809  request = ff_safe_queue_pop_front(ov_model->request_queue);
810  if (!request) {
811  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
812  return DNN_ERROR;
813  }
814 
815  return execute_model_ov(request, ov_model->inference_queue);
816 }
817 
819 {
820  OVModel *ov_model = model->model;
821  OVContext *ctx = &ov_model->ctx;
822  RequestItem *request;
823  TaskItem *task;
825 
826  if (ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params) != 0) {
827  return DNN_ERROR;
828  }
829 
830  if (!ov_model->exe_network) {
831  if (init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]) != DNN_SUCCESS) {
832  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
833  return DNN_ERROR;
834  }
835  }
836 
837  task = av_malloc(sizeof(*task));
838  if (!task) {
839  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
840  return DNN_ERROR;
841  }
842 
843  if (ff_dnn_fill_task(task, exec_params, ov_model, 1, 1) != DNN_SUCCESS) {
844  return DNN_ERROR;
845  }
846 
847  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
848  av_freep(&task);
849  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
850  return DNN_ERROR;
851  }
852 
853  if (extract_inference_from_task(model->func_type, task, ov_model->inference_queue, exec_params) != DNN_SUCCESS) {
854  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
855  return DNN_ERROR;
856  }
857 
858  while (ff_queue_size(ov_model->inference_queue) >= ctx->options.batch_size) {
859  request = ff_safe_queue_pop_front(ov_model->request_queue);
860  if (!request) {
861  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
862  return DNN_ERROR;
863  }
864 
865  ret = execute_model_ov(request, ov_model->inference_queue);
866  if (ret != DNN_SUCCESS) {
867  return ret;
868  }
869  }
870 
871  return DNN_SUCCESS;
872 }
873 
875 {
876  OVModel *ov_model = model->model;
877  TaskItem *task = ff_queue_peek_front(ov_model->task_queue);
878 
879  if (!task) {
880  return DAST_EMPTY_QUEUE;
881  }
882 
883  if (task->inference_done != task->inference_todo) {
884  return DAST_NOT_READY;
885  }
886 
887  *in = task->in_frame;
888  *out = task->out_frame;
889  ff_queue_pop_front(ov_model->task_queue);
890  av_freep(&task);
891 
892  return DAST_SUCCESS;
893 }
894 
896 {
897  OVModel *ov_model = model->model;
898  OVContext *ctx = &ov_model->ctx;
899  RequestItem *request;
900  IEStatusCode status;
902 
903  if (ff_queue_size(ov_model->inference_queue) == 0) {
904  // no pending task need to flush
905  return DNN_SUCCESS;
906  }
907 
908  request = ff_safe_queue_pop_front(ov_model->request_queue);
909  if (!request) {
910  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
911  return DNN_ERROR;
912  }
913 
914  ret = fill_model_input_ov(ov_model, request);
915  if (ret != DNN_SUCCESS) {
916  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
917  return ret;
918  }
919  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
920  if (status != OK) {
921  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
922  return DNN_ERROR;
923  }
924  status = ie_infer_request_infer_async(request->infer_request);
925  if (status != OK) {
926  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
927  return DNN_ERROR;
928  }
929 
930  return DNN_SUCCESS;
931 }
932 
934 {
935  if (*model){
936  OVModel *ov_model = (*model)->model;
937  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
939  if (item && item->infer_request) {
940  ie_infer_request_free(&item->infer_request);
941  }
942  av_freep(&item->inferences);
943  av_freep(&item);
944  }
946 
947  while (ff_queue_size(ov_model->inference_queue) != 0) {
949  av_freep(&item);
950  }
952 
953  while (ff_queue_size(ov_model->task_queue) != 0) {
954  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
955  av_frame_free(&item->in_frame);
956  av_frame_free(&item->out_frame);
957  av_freep(&item);
958  }
959  ff_queue_destroy(ov_model->task_queue);
960 
961  if (ov_model->exe_network)
962  ie_exec_network_free(&ov_model->exe_network);
963  if (ov_model->network)
964  ie_network_free(&ov_model->network);
965  if (ov_model->core)
966  ie_core_free(&ov_model->core);
967  av_freep(&ov_model);
968  av_freep(model);
969  }
970 }
InferenceItem
Definition: dnn_backend_common.h:44
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
OVContext::class
const AVClass * class
Definition: dnn_backend_openvino.c:47
ff_dnn_fill_task
DNNReturnType ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:53
opt.h
filter_ctx
static FilteringContext * filter_ctx
Definition: transcoding.c:48
RequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:67
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1358
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:616
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:56
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:152
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:26
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:89
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVFrame::width
int width
Definition: frame.h:361
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_openvino)
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:248
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:101
ff_dnn_load_model_ov
DNNModel * ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_backend_openvino.c:712
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:54
FLAGS
#define FLAGS
Definition: cmdutils.c:539
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:68
dnn_io_proc.h
TaskItem
Definition: dnn_backend_common.h:30
InferenceItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_common.h:46
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:51
get_input_ov
static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:474
OVOptions::batch_size
int batch_size
Definition: dnn_backend_openvino.c:42
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
InferenceItem::task
TaskItem * task
Definition: dnn_backend_common.h:45
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:90
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:48
ff_proc_from_dnn_to_frame
DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:27
ff_proc_from_frame_to_dnn
DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:100
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
TaskItem::model
void * model
Definition: dnn_backend_common.h:31
DNN_SUCCESS
@ DNN_SUCCESS
Definition: dnn_interface.h:33
OVOptions::device_type
char * device_type
Definition: dnn_backend_openvino.c:40
DNNModel::get_output
DNNReturnType(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:97
init_model_ov
static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
Definition: dnn_backend_openvino.c:303
Queue
Linear double-ended data structure.
Definition: queue.c:34
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:98
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:131
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
AVFrameSideData::size
size_t size
Definition: frame.h:212
DNNExecClassificationParams
Definition: dnn_interface.h:75
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:73
ff_dnn_free_model_ov
void ff_dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:933
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
DNNReturnType
DNNReturnType
Definition: dnn_interface.h:33
DNNData
Definition: dnn_interface.h:59
OVModel::inference_queue
Queue * inference_queue
Definition: dnn_backend_openvino.c:59
ctx
AVFormatContext * ctx
Definition: movenc.c:48
DNNModel::get_input
DNNReturnType(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:95
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:39
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:35
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
if
if(ret)
Definition: filter_design.txt:179
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:77
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
fill_model_input_ov
static DNNReturnType fill_model_input_ov(OVModel *ov_model, RequestItem *request)
Definition: dnn_backend_openvino.c:114
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:55
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:104
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1559
RequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:64
execute_model_ov
static DNNReturnType execute_model_ov(RequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:425
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:32
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
OVModel::ctx
OVContext ctx
Definition: dnn_backend_openvino.c:52
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:184
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:225
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:100
options
const OptionDef options[]
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:36
DAST_SUCCESS
@ DAST_SUCCESS
Definition: dnn_interface.h:49
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:40
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
queue.h
DAST_EMPTY_QUEUE
@ DAST_EMPTY_QUEUE
Definition: dnn_interface.h:47
OVModel::model
DNNModel * model
Definition: dnn_backend_openvino.c:53
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:106
RequestItem::inference_count
uint32_t inference_count
Definition: dnn_backend_openvino.c:66
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:92
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:211
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
header
static const uint8_t header[24]
Definition: sdr2.c:67
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
RequestItem
Definition: dnn_backend_openvino.c:63
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
get_output_ov
static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:627
ff_frame_to_dnn_detect
DNNReturnType ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:252
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
ff_dnn_flush_ov
DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
Definition: dnn_backend_openvino.c:895
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:524
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
OVOptions::input_resizable
int input_resizable
Definition: dnn_backend_openvino.c:43
RequestItem::inferences
InferenceItem ** inferences
Definition: dnn_backend_openvino.c:65
i
int i
Definition: input.c:407
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:76
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:196
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
ff_dnn_get_async_result_ov
DNNAsyncStatusType ff_dnn_get_async_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_openvino.c:874
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
ff_frame_to_dnn_classify
DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:193
precision_to_datatype
static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:86
DNN_ERROR
@ DNN_ERROR
Definition: dnn_interface.h:33
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:244
safe_queue.h
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:57
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:35
OVContext
Definition: dnn_backend_openvino.c:46
dnn_backend_openvino.h
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:108
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:33
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:58
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:56
AVFrame::height
int height
Definition: frame.h:361
dnn_backend_common.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:94
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:69
AVFilterContext
An instance of a filter.
Definition: avfilter.h:333
DNNModel
Definition: dnn_interface.h:84
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:34
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
DNNModel::options
const char * options
Definition: dnn_interface.h:88
OVOptions::nireq
int nireq
Definition: dnn_backend_openvino.c:41
ff_dnn_execute_model_ov
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:769
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
OVOptions
Definition: dnn_backend_openvino.c:39
DNNExecBaseParams
Definition: dnn_interface.h:67
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVDetectionBBox
Definition: detection_bbox.h:26
extract_inference_from_task
static DNNReturnType extract_inference_from_task(DNNFunctionType func_type, TaskItem *task, Queue *inference_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:561
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:37
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:41
DAST_NOT_READY
@ DAST_NOT_READY
Definition: dnn_interface.h:48
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
detection_bbox.h
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:189
TaskItem::nb_output
uint32_t nb_output
Definition: dnn_backend_common.h:38
OVContext::options
OVOptions options
Definition: dnn_backend_openvino.c:48
ff_dnn_execute_model_async_ov
DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:818
DNNModel::model
void * model
Definition: dnn_interface.h:86