FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_backend_openvino.h"
27 #include "dnn_io_proc.h"
28 #include "libavformat/avio.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/avstring.h"
32 #include "../internal.h"
33 #include "queue.h"
34 #include "safe_queue.h"
35 #include <c_api/ie_c_api.h>
36 
37 typedef struct OVOptions{
38  char *device_type;
39  int nireq;
42 } OVOptions;
43 
44 typedef struct OVContext {
45  const AVClass *class;
47 } OVContext;
48 
49 typedef struct OVModel{
52  ie_core_t *core;
53  ie_network_t *network;
54  ie_executable_network_t *exe_network;
55  ie_infer_request_t *infer_request;
56 
57  /* for async execution */
58  FFSafeQueue *request_queue; // holds RequestItem
59  FFQueue *task_queue; // holds TaskItem
60 } OVModel;
61 
62 typedef struct TaskItem {
64  const char *input_name;
66  const char *output_name;
68  int do_ioproc;
69  int async;
70  int done;
71 } TaskItem;
72 
73 typedef struct RequestItem {
74  ie_infer_request_t *infer_request;
77  ie_complete_call_back_t callback;
78 } RequestItem;
79 
80 #define APPEND_STRING(generated_string, iterate_string) \
81  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
82  av_asprintf("%s", iterate_string);
83 
84 #define OFFSET(x) offsetof(OVContext, x)
85 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
86 static const AVOption dnn_openvino_options[] = {
87  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
88  { "nireq", "number of request", OFFSET(options.nireq), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
89  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
90  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
91  { NULL }
92 };
93 
94 AVFILTER_DEFINE_CLASS(dnn_openvino);
95 
96 static DNNDataType precision_to_datatype(precision_e precision)
97 {
98  switch (precision)
99  {
100  case FP32:
101  return DNN_FLOAT;
102  default:
103  av_assert0(!"not supported yet.");
104  return DNN_FLOAT;
105  }
106 }
107 
109 {
110  switch (dt)
111  {
112  case DNN_FLOAT:
113  return sizeof(float);
114  default:
115  av_assert0(!"not supported yet.");
116  return 1;
117  }
118 }
119 
121 {
122  dimensions_t dims;
123  precision_e precision;
124  ie_blob_buffer_t blob_buffer;
125  OVContext *ctx = &ov_model->ctx;
126  IEStatusCode status;
127  DNNData input;
128  ie_blob_t *input_blob = NULL;
129  TaskItem *task = request->tasks[0];
130 
131  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
132  if (status != OK) {
133  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
134  return DNN_ERROR;
135  }
136 
137  status |= ie_blob_get_dims(input_blob, &dims);
138  status |= ie_blob_get_precision(input_blob, &precision);
139  if (status != OK) {
140  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
141  return DNN_ERROR;
142  }
143 
144  status = ie_blob_get_buffer(input_blob, &blob_buffer);
145  if (status != OK) {
146  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
147  return DNN_ERROR;
148  }
149 
150  input.height = dims.dims[2];
151  input.width = dims.dims[3];
152  input.channels = dims.dims[1];
153  input.data = blob_buffer.buffer;
154  input.dt = precision_to_datatype(precision);
155 
156  av_assert0(request->task_count <= dims.dims[0]);
157  for (int i = 0; i < request->task_count; ++i) {
158  task = request->tasks[i];
159  if (task->do_ioproc) {
160  if (ov_model->model->pre_proc != NULL) {
161  ov_model->model->pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
162  } else {
163  proc_from_frame_to_dnn(task->in_frame, &input, ctx);
164  }
165  }
166  input.data = (uint8_t *)input.data
167  + input.width * input.height * input.channels * get_datatype_size(input.dt);
168  }
169  ie_blob_free(&input_blob);
170 
171  return DNN_SUCCESS;
172 }
173 
174 static void infer_completion_callback(void *args)
175 {
176  dimensions_t dims;
177  precision_e precision;
178  IEStatusCode status;
179  RequestItem *request = args;
180  TaskItem *task = request->tasks[0];
181  ie_blob_t *output_blob = NULL;
182  ie_blob_buffer_t blob_buffer;
183  DNNData output;
184  OVContext *ctx = &task->ov_model->ctx;
185 
186  status = ie_infer_request_get_blob(request->infer_request, task->output_name, &output_blob);
187  if (status != OK) {
188  //incorrect output name
189  char *model_output_name = NULL;
190  char *all_output_names = NULL;
191  size_t model_output_count = 0;
192  av_log(ctx, AV_LOG_ERROR, "Failed to get model output data\n");
193  status = ie_network_get_outputs_number(task->ov_model->network, &model_output_count);
194  for (size_t i = 0; i < model_output_count; i++) {
195  status = ie_network_get_output_name(task->ov_model->network, i, &model_output_name);
196  APPEND_STRING(all_output_names, model_output_name)
197  }
198  av_log(ctx, AV_LOG_ERROR,
199  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
200  task->output_name, all_output_names);
201  return;
202  }
203 
204  status = ie_blob_get_buffer(output_blob, &blob_buffer);
205  if (status != OK) {
206  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
207  return;
208  }
209 
210  status |= ie_blob_get_dims(output_blob, &dims);
211  status |= ie_blob_get_precision(output_blob, &precision);
212  if (status != OK) {
213  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
214  return;
215  }
216 
217  output.channels = dims.dims[1];
218  output.height = dims.dims[2];
219  output.width = dims.dims[3];
220  output.dt = precision_to_datatype(precision);
221  output.data = blob_buffer.buffer;
222 
223  av_assert0(request->task_count <= dims.dims[0]);
224  av_assert0(request->task_count >= 1);
225  for (int i = 0; i < request->task_count; ++i) {
226  task = request->tasks[i];
227  if (task->do_ioproc) {
228  if (task->ov_model->model->post_proc != NULL) {
229  task->ov_model->model->post_proc(task->out_frame, &output, task->ov_model->model->filter_ctx);
230  } else {
231  proc_from_dnn_to_frame(task->out_frame, &output, ctx);
232  }
233  } else {
234  task->out_frame->width = output.width;
235  task->out_frame->height = output.height;
236  }
237  task->done = 1;
238  output.data = (uint8_t *)output.data
239  + output.width * output.height * output.channels * get_datatype_size(output.dt);
240  }
241  ie_blob_free(&output_blob);
242 
243  request->task_count = 0;
244 
245  if (task->async) {
246  if (ff_safe_queue_push_back(task->ov_model->request_queue, request) < 0) {
247  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
248  return;
249  }
250  }
251 }
252 
254 {
255  OVContext *ctx = &ov_model->ctx;
256  IEStatusCode status;
257  ie_available_devices_t a_dev;
258  ie_config_t config = {NULL, NULL, NULL};
259  char *all_dev_names = NULL;
260 
261  // batch size
262  if (ctx->options.batch_size <= 0) {
263  ctx->options.batch_size = 1;
264  }
265 
266  if (ctx->options.batch_size > 1) {
267  input_shapes_t input_shapes;
268  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
269  if (status != OK)
270  goto err;
271  for (int i = 0; i < input_shapes.shape_num; i++)
272  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
273  status = ie_network_reshape(ov_model->network, input_shapes);
274  ie_network_input_shapes_free(&input_shapes);
275  if (status != OK)
276  goto err;
277  }
278 
279  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
280  if (status != OK) {
281  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
282  status = ie_core_get_available_devices(ov_model->core, &a_dev);
283  if (status != OK) {
284  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
285  goto err;
286  }
287  for (int i = 0; i < a_dev.num_devices; i++) {
288  APPEND_STRING(all_dev_names, a_dev.devices[i])
289  }
290  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
291  ctx->options.device_type, all_dev_names);
292  goto err;
293  }
294 
295  // create infer_request for sync execution
296  status = ie_exec_network_create_infer_request(ov_model->exe_network, &ov_model->infer_request);
297  if (status != OK)
298  goto err;
299 
300  // create infer_requests for async execution
301  if (ctx->options.nireq <= 0) {
302  // the default value is a rough estimation
303  ctx->options.nireq = av_cpu_count() / 2 + 1;
304  }
305 
306  ov_model->request_queue = ff_safe_queue_create();
307  if (!ov_model->request_queue) {
308  goto err;
309  }
310 
311  for (int i = 0; i < ctx->options.nireq; i++) {
312  RequestItem *item = av_mallocz(sizeof(*item));
313  if (!item) {
314  goto err;
315  }
316 
317  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
318  if (status != OK) {
319  av_freep(&item);
320  goto err;
321  }
322 
323  item->tasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->tasks));
324  if (!item->tasks) {
325  av_freep(&item);
326  goto err;
327  }
328  item->task_count = 0;
329 
330  item->callback.completeCallBackFunc = infer_completion_callback;
331  item->callback.args = item;
332  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
333  av_freep(&item);
334  goto err;
335  }
336  }
337 
338  ov_model->task_queue = ff_queue_create();
339  if (!ov_model->task_queue) {
340  goto err;
341  }
342 
343  return DNN_SUCCESS;
344 
345 err:
346  ff_dnn_free_model_ov(&ov_model->model);
347  return DNN_ERROR;
348 }
349 
351 {
352  IEStatusCode status;
354  TaskItem *task = request->tasks[0];
355  OVContext *ctx = &task->ov_model->ctx;
356 
357  if (task->async) {
358  if (request->task_count < ctx->options.batch_size) {
359  if (ff_safe_queue_push_front(task->ov_model->request_queue, request) < 0) {
360  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
361  return DNN_ERROR;
362  }
363  return DNN_SUCCESS;
364  }
365  ret = fill_model_input_ov(task->ov_model, request);
366  if (ret != DNN_SUCCESS) {
367  return ret;
368  }
369  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
370  if (status != OK) {
371  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
372  return DNN_ERROR;
373  }
374  status = ie_infer_request_infer_async(request->infer_request);
375  if (status != OK) {
376  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
377  return DNN_ERROR;
378  }
379  return DNN_SUCCESS;
380  } else {
381  ret = fill_model_input_ov(task->ov_model, request);
382  if (ret != DNN_SUCCESS) {
383  return ret;
384  }
385  status = ie_infer_request_infer(request->infer_request);
386  if (status != OK) {
387  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
388  return DNN_ERROR;
389  }
390  infer_completion_callback(request);
391  return task->done ? DNN_SUCCESS : DNN_ERROR;
392  }
393 }
394 
395 static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
396 {
397  OVModel *ov_model = (OVModel *)model;
398  OVContext *ctx = &ov_model->ctx;
399  char *model_input_name = NULL;
400  char *all_input_names = NULL;
401  IEStatusCode status;
402  size_t model_input_count = 0;
403  dimensions_t dims;
404  precision_e precision;
406 
407  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
408  if (status != OK) {
409  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
410  return DNN_ERROR;
411  }
412 
413  for (size_t i = 0; i < model_input_count; i++) {
414  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
415  if (status != OK) {
416  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
417  return DNN_ERROR;
418  }
419  if (strcmp(model_input_name, input_name) == 0) {
420  ie_network_name_free(&model_input_name);
421  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
422  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
423  if (status != OK) {
424  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
425  return DNN_ERROR;
426  }
427 
428  input->channels = dims.dims[1];
429  input->height = input_resizable ? -1 : dims.dims[2];
430  input->width = input_resizable ? -1 : dims.dims[3];
431  input->dt = precision_to_datatype(precision);
432  return DNN_SUCCESS;
433  } else {
434  //incorrect input name
435  APPEND_STRING(all_input_names, model_input_name)
436  }
437 
438  ie_network_name_free(&model_input_name);
439  }
440 
441  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, all_input_names);
442  return DNN_ERROR;
443 }
444 
445 static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height,
446  const char *output_name, int *output_width, int *output_height)
447 {
449  OVModel *ov_model = (OVModel *)model;
450  OVContext *ctx = &ov_model->ctx;
451  TaskItem task;
452  RequestItem request;
453  AVFrame *in_frame = av_frame_alloc();
454  AVFrame *out_frame = NULL;
455  TaskItem *ptask = &task;
456  IEStatusCode status;
457  input_shapes_t input_shapes;
458 
459  if (!in_frame) {
460  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input frame\n");
461  return DNN_ERROR;
462  }
463  out_frame = av_frame_alloc();
464  if (!out_frame) {
465  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output frame\n");
466  av_frame_free(&in_frame);
467  return DNN_ERROR;
468  }
469  in_frame->width = input_width;
470  in_frame->height = input_height;
471 
472  if (ctx->options.input_resizable) {
473  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
474  input_shapes.shapes->shape.dims[2] = input_height;
475  input_shapes.shapes->shape.dims[3] = input_width;
476  status |= ie_network_reshape(ov_model->network, input_shapes);
477  ie_network_input_shapes_free(&input_shapes);
478  if (status != OK) {
479  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
480  return DNN_ERROR;
481  }
482  }
483 
484  if (!ov_model->exe_network) {
485  if (init_model_ov(ov_model) != DNN_SUCCESS) {
486  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
487  return DNN_ERROR;
488  };
489  }
490 
491  task.done = 0;
492  task.do_ioproc = 0;
493  task.async = 0;
494  task.input_name = input_name;
495  task.in_frame = in_frame;
496  task.output_name = output_name;
497  task.out_frame = out_frame;
498  task.ov_model = ov_model;
499 
500  request.infer_request = ov_model->infer_request;
501  request.task_count = 1;
502  request.tasks = &ptask;
503 
504  ret = execute_model_ov(&request);
505  *output_width = out_frame->width;
506  *output_height = out_frame->height;
507 
508  av_frame_free(&out_frame);
509  av_frame_free(&in_frame);
510  return ret;
511 }
512 
513 DNNModel *ff_dnn_load_model_ov(const char *model_filename, const char *options, AVFilterContext *filter_ctx)
514 {
515  DNNModel *model = NULL;
516  OVModel *ov_model = NULL;
517  OVContext *ctx = NULL;
518  IEStatusCode status;
519 
520  model = av_mallocz(sizeof(DNNModel));
521  if (!model){
522  return NULL;
523  }
524 
525  ov_model = av_mallocz(sizeof(OVModel));
526  if (!ov_model) {
527  av_freep(&model);
528  return NULL;
529  }
530  model->model = (void *)ov_model;
531  ov_model->model = model;
532  ov_model->ctx.class = &dnn_openvino_class;
533  ctx = &ov_model->ctx;
534 
535  //parse options
536  av_opt_set_defaults(ctx);
537  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
538  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
539  goto err;
540  }
541 
542  status = ie_core_create("", &ov_model->core);
543  if (status != OK)
544  goto err;
545 
546  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
547  if (status != OK)
548  goto err;
549 
550  model->get_input = &get_input_ov;
551  model->get_output = &get_output_ov;
552  model->options = options;
553  model->filter_ctx = filter_ctx;
554 
555  return model;
556 
557 err:
558  ff_dnn_free_model_ov(&model);
559  return NULL;
560 }
561 
562 DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, const char *input_name, AVFrame *in_frame,
563  const char **output_names, uint32_t nb_output, AVFrame *out_frame)
564 {
565  OVModel *ov_model = (OVModel *)model->model;
566  OVContext *ctx = &ov_model->ctx;
567  TaskItem task;
568  RequestItem request;
569  TaskItem *ptask = &task;
570 
571  if (!in_frame) {
572  av_log(ctx, AV_LOG_ERROR, "in frame is NULL when execute model.\n");
573  return DNN_ERROR;
574  }
575 
576  if (!out_frame) {
577  av_log(ctx, AV_LOG_ERROR, "out frame is NULL when execute model.\n");
578  return DNN_ERROR;
579  }
580 
581  if (nb_output != 1) {
582  // currently, the filter does not need multiple outputs,
583  // so we just pending the support until we really need it.
584  av_log(ctx, AV_LOG_ERROR, "do not support multiple outputs\n");
585  return DNN_ERROR;
586  }
587 
588  if (ctx->options.batch_size > 1) {
589  av_log(ctx, AV_LOG_ERROR, "do not support batch mode for sync execution.\n");
590  return DNN_ERROR;
591  }
592 
593  if (!ov_model->exe_network) {
594  if (init_model_ov(ov_model) != DNN_SUCCESS) {
595  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
596  return DNN_ERROR;
597  };
598  }
599 
600  task.done = 0;
601  task.do_ioproc = 1;
602  task.async = 0;
603  task.input_name = input_name;
604  task.in_frame = in_frame;
605  task.output_name = output_names[0];
606  task.out_frame = out_frame;
607  task.ov_model = ov_model;
608 
609  request.infer_request = ov_model->infer_request;
610  request.task_count = 1;
611  request.tasks = &ptask;
612 
613  return execute_model_ov(&request);
614 }
615 
616 DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, const char *input_name, AVFrame *in_frame,
617  const char **output_names, uint32_t nb_output, AVFrame *out_frame)
618 {
619  OVModel *ov_model = (OVModel *)model->model;
620  OVContext *ctx = &ov_model->ctx;
621  RequestItem *request;
622  TaskItem *task;
623 
624  if (!in_frame) {
625  av_log(ctx, AV_LOG_ERROR, "in frame is NULL when async execute model.\n");
626  return DNN_ERROR;
627  }
628 
629  if (!out_frame) {
630  av_log(ctx, AV_LOG_ERROR, "out frame is NULL when async execute model.\n");
631  return DNN_ERROR;
632  }
633 
634  task = av_malloc(sizeof(*task));
635  if (!task) {
636  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
637  return DNN_ERROR;
638  }
639 
640  if (!ov_model->exe_network) {
641  if (init_model_ov(ov_model) != DNN_SUCCESS) {
642  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
643  return DNN_ERROR;
644  };
645  }
646 
647  task->done = 0;
648  task->do_ioproc = 1;
649  task->async = 1;
650  task->input_name = input_name;
651  task->in_frame = in_frame;
652  task->output_name = output_names[0];
653  task->out_frame = out_frame;
654  task->ov_model = ov_model;
655  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
656  av_freep(&task);
657  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
658  return DNN_ERROR;
659  }
660 
661  request = ff_safe_queue_pop_front(ov_model->request_queue);
662  if (!request) {
663  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
664  return DNN_ERROR;
665  }
666 
667  request->tasks[request->task_count++] = task;
668  return execute_model_ov(request);
669 }
670 
672 {
673  OVModel *ov_model = (OVModel *)model->model;
674  TaskItem *task = ff_queue_peek_front(ov_model->task_queue);
675 
676  if (!task) {
677  return DAST_EMPTY_QUEUE;
678  }
679 
680  if (!task->done) {
681  return DAST_NOT_READY;
682  }
683 
684  *in = task->in_frame;
685  *out = task->out_frame;
686  ff_queue_pop_front(ov_model->task_queue);
687  av_freep(&task);
688 
689  return DAST_SUCCESS;
690 }
691 
693 {
694  OVModel *ov_model = (OVModel *)model->model;
695  OVContext *ctx = &ov_model->ctx;
696  RequestItem *request;
697  IEStatusCode status;
699 
700  request = ff_safe_queue_pop_front(ov_model->request_queue);
701  if (!request) {
702  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
703  return DNN_ERROR;
704  }
705 
706  if (request->task_count == 0) {
707  // no pending task need to flush
708  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
709  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
710  return DNN_ERROR;
711  }
712  return DNN_SUCCESS;
713  }
714 
715  ret = fill_model_input_ov(ov_model, request);
716  if (ret != DNN_SUCCESS) {
717  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
718  return ret;
719  }
720  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
721  if (status != OK) {
722  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
723  return DNN_ERROR;
724  }
725  status = ie_infer_request_infer_async(request->infer_request);
726  if (status != OK) {
727  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
728  return DNN_ERROR;
729  }
730 
731  return DNN_SUCCESS;
732 }
733 
735 {
736  if (*model){
737  OVModel *ov_model = (OVModel *)(*model)->model;
738  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
740  if (item && item->infer_request) {
741  ie_infer_request_free(&item->infer_request);
742  }
743  av_freep(&item->tasks);
744  av_freep(&item);
745  }
747 
748  while (ff_queue_size(ov_model->task_queue) != 0) {
749  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
750  av_frame_free(&item->in_frame);
751  av_frame_free(&item->out_frame);
752  av_freep(&item);
753  }
754  ff_queue_destroy(ov_model->task_queue);
755 
756  if (ov_model->infer_request)
757  ie_infer_request_free(&ov_model->infer_request);
758  if (ov_model->exe_network)
759  ie_exec_network_free(&ov_model->exe_network);
760  if (ov_model->network)
761  ie_network_free(&ov_model->network);
762  if (ov_model->core)
763  ie_core_free(&ov_model->core);
764  av_freep(&ov_model);
765  av_freep(model);
766  }
767 }
void * model
Definition: dnn_interface.h:54
#define NULL
Definition: coverity.c:32
DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, const char *input_name, AVFrame *in_frame, const char **output_names, uint32_t nb_output, AVFrame *out_frame)
const char * output_name
Buffered I/O operations.
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
AVOption.
Definition: opt.h:248
int av_cpu_count(void)
Definition: cpu.c:275
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1358
int channels
Definition: dnn_interface.h:49
int(* post_proc)(AVFrame *frame_out, DNNData *model_output, AVFilterContext *filter_ctx)
Definition: dnn_interface.h:70
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
static int get_datatype_size(DNNDataType dt)
const char * options
Definition: dnn_interface.h:56
DNNReturnType proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:25
ie_complete_call_back_t callback
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1559
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AVOptions.
static const AVOption dnn_openvino_options[]
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
ie_infer_request_t * infer_request
static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
int(* pre_proc)(AVFrame *frame_in, DNNData *model_input, AVFilterContext *filter_ctx)
Definition: dnn_interface.h:67
void * ff_safe_queue_pop_front(FFSafeQueue *sq)
Definition: safe_queue.c:105
DNNModel * model
ie_infer_request_t * infer_request
static DNNReturnType execute_model_ov(RequestItem *request)
static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
#define av_log(a,...)
DNNDataType
Definition: dnn_interface.h:37
size_t ff_queue_size(FFQueue *q)
Definition: queue.c:89
static DNNReturnType init_model_ov(OVModel *ov_model)
DNNReturnType(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:61
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
const char * input_name
int height
Definition: dnn_interface.h:49
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int ff_queue_push_back(FFQueue *q, void *v)
Definition: queue.c:131
void * data
Definition: dnn_interface.h:47
simple assert() macros that are a bit more flexible than ISO C assert().
static FilteringContext * filter_ctx
Definition: transcoding.c:47
ie_core_t * core
#define FLAGS
OVOptions options
DNNModel * ff_dnn_load_model_ov(const char *model_filename, const char *options, AVFilterContext *filter_ctx)
static DNNDataType precision_to_datatype(precision_e precision)
void ff_queue_destroy(FFQueue *q)
Definition: queue.c:73
FFQueue * ff_queue_create(void)
Definition: queue.c:48
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, const char *input_name, AVFrame *in_frame, const char **output_names, uint32_t nb_output, AVFrame *out_frame)
DNNAsyncStatusType
Definition: dnn_interface.h:39
const AVClass * class
void * ff_queue_peek_front(FFQueue *q)
Definition: queue.c:94
size_t ff_safe_queue_size(FFSafeQueue *sq)
Definition: safe_queue.c:80
FFSafeQueue * ff_safe_queue_create(void)
Definition: safe_queue.c:52
OVContext ctx
AVFormatContext * ctx
Definition: movenc.c:48
#define OFFSET(x)
AVFrame * in_frame
DNNReturnType
Definition: dnn_interface.h:33
Definition: queue.c:34
AVFILTER_DEFINE_CLASS(dnn_openvino)
if(ret)
static DNNReturnType fill_model_input_ov(OVModel *ov_model, RequestItem *request)
ie_network_t * network
AVFrame * out_frame
DNN input&output process between AVFrame and DNNData.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
void ff_dnn_free_model_ov(DNNModel **model)
int ff_safe_queue_push_front(FFSafeQueue *sq, void *v)
Definition: safe_queue.c:85
FFSafeQueue * request_queue
void * ff_queue_pop_front(FFQueue *q)
Definition: queue.c:152
const OptionDef options[]
Definition: ffmpeg_opt.c:3416
DNNReturnType(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:63
DNN inference functions interface for OpenVINO backend.
DNNReturnType proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:96
static void infer_completion_callback(void *args)
void ff_safe_queue_destroy(FFSafeQueue *sq)
Definition: safe_queue.c:69
TaskItem ** tasks
#define APPEND_STRING(generated_string, iterate_string)
An instance of a filter.
Definition: avfilter.h:341
OVModel * ov_model
int ff_safe_queue_push_back(FFSafeQueue *sq, void *v)
Definition: safe_queue.c:95
FFQueue * task_queue
int height
Definition: frame.h:372
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define av_malloc_array(a, b)
DNNAsyncStatusType ff_dnn_get_async_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
AVFilterContext * filter_ctx
Definition: dnn_interface.h:58
DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
DNNDataType dt
Definition: dnn_interface.h:48
ie_executable_network_t * exe_network
int i
Definition: input.c:407