FFmpeg
dnn_backend_tf.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN tensorflow backend implementation.
24  */
25 
26 #include "libavformat/avio.h"
27 #include "libavutil/avassert.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 #include "libavcodec/defs.h"
33 #include "../internal.h"
34 #include "dnn_io_proc.h"
35 #include "dnn_backend_common.h"
36 #include "safe_queue.h"
37 #include <tensorflow/c/c_api.h>
38 
39 typedef struct TFModel {
42  TF_Graph *graph;
43  TF_Session *session;
44  TF_Status *status;
48 } TFModel;
49 
50 /**
51  * Stores execution parameters for single
52  * call to the TensorFlow C API
53  */
54 typedef struct TFInferRequest {
55  TF_Output *tf_outputs;
56  TF_Tensor **output_tensors;
57  TF_Output *tf_input;
58  TF_Tensor *input_tensor;
60 
61 typedef struct TFRequestItem {
64  TF_Status *status;
67 
68 #define OFFSET(x) offsetof(TFOptions, x)
69 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
70 static const AVOption dnn_tensorflow_options[] = {
71  { "sess_config", "config for SessionOptions", OFFSET(sess_config), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
72  { NULL }
73 };
74 
75 
76 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue);
77 static void infer_completion_callback(void *args);
78 static inline void destroy_request_item(TFRequestItem **arg);
79 
80 static void free_buffer(void *data, size_t length)
81 {
82  av_freep(&data);
83 }
84 
85 /**
86  * Free the contents of TensorFlow inference request.
87  * It does not free the TFInferRequest instance.
88  *
89  * @param request pointer to TFInferRequest instance.
90  * NULL pointer is allowed.
91  */
92 static void tf_free_request(TFInferRequest *request)
93 {
94  if (!request)
95  return;
96  if (request->input_tensor) {
97  TF_DeleteTensor(request->input_tensor);
98  request->input_tensor = NULL;
99  }
100  av_freep(&request->tf_input);
101  av_freep(&request->tf_outputs);
102  if (request->output_tensors) {
103  int nb_output = sizeof(*request->output_tensors)/sizeof(request->output_tensors[0]);
104  for (uint32_t i = 0; i < nb_output; ++i) {
105  if (request->output_tensors[i]) {
106  TF_DeleteTensor(request->output_tensors[i]);
107  request->output_tensors[i] = NULL;
108  }
109  }
110  av_freep(&request->output_tensors);
111  }
112 }
113 
114 /**
115  * Create a TensorFlow inference request. All properties
116  * are initially unallocated and set as NULL.
117  *
118  * @return pointer to the allocated TFInferRequest instance.
119  */
121 {
122  TFInferRequest *infer_request = av_malloc(sizeof(TFInferRequest));
123  if (!infer_request) {
124  return NULL;
125  }
126  infer_request->tf_outputs = NULL;
127  infer_request->tf_input = NULL;
128  infer_request->input_tensor = NULL;
129  infer_request->output_tensors = NULL;
130  return infer_request;
131 }
132 
133 /**
134  * Start synchronous inference for the TensorFlow model.
135  *
136  * @param request pointer to the TFRequestItem for inference
137  * @retval 0 if execution is successful
138  * @retval AVERROR(EINVAL) if request is NULL
139  * @retval DNN_GENERIC_ERROR if execution fails
140  */
141 static int tf_start_inference(void *args)
142 {
143  TFRequestItem *request = args;
144  TFInferRequest *infer_request = request->infer_request;
145  LastLevelTaskItem *lltask = request->lltask;
146  TaskItem *task = lltask->task;
147  TFModel *tf_model = task->model;
148 
149  if (!request) {
150  av_log(tf_model->ctx, AV_LOG_ERROR, "TFRequestItem is NULL\n");
151  return AVERROR(EINVAL);
152  }
153 
154  TF_SessionRun(tf_model->session, NULL,
155  infer_request->tf_input, &infer_request->input_tensor, 1,
156  infer_request->tf_outputs, infer_request->output_tensors,
157  task->nb_output, NULL, 0, NULL,
158  request->status);
159  if (TF_GetCode(request->status) != TF_OK) {
160  av_log(tf_model->ctx, AV_LOG_ERROR, "%s", TF_Message(request->status));
161  return DNN_GENERIC_ERROR;
162  }
163  return 0;
164 }
165 
166 /**
167  * Free the TFRequestItem completely.
168  *
169  * @param arg Address of the TFInferRequest instance.
170  */
171 static inline void destroy_request_item(TFRequestItem **arg) {
172  TFRequestItem *request;
173  if (!arg) {
174  return;
175  }
176  request = *arg;
177  tf_free_request(request->infer_request);
178  av_freep(&request->infer_request);
179  av_freep(&request->lltask);
180  TF_DeleteStatus(request->status);
182  av_freep(arg);
183 }
184 
185 static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
186 {
187  TFModel *tf_model = task->model;
188  DnnContext *ctx = tf_model->ctx;
189  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
190  if (!lltask) {
191  av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
192  return AVERROR(ENOMEM);
193  }
194  task->inference_todo = 1;
195  task->inference_done = 0;
196  lltask->task = task;
197  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
198  av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
199  av_freep(&lltask);
200  return AVERROR(ENOMEM);
201  }
202  return 0;
203 }
204 
205 static TF_Buffer *read_graph(const char *model_filename)
206 {
207  TF_Buffer *graph_buf;
208  unsigned char *graph_data = NULL;
209  AVIOContext *model_file_context;
210  long size, bytes_read;
211 
212  if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
213  return NULL;
214  }
215 
216  size = avio_size(model_file_context);
217 
218  graph_data = av_malloc(size);
219  if (!graph_data){
220  avio_closep(&model_file_context);
221  return NULL;
222  }
223  bytes_read = avio_read(model_file_context, graph_data, size);
224  avio_closep(&model_file_context);
225  if (bytes_read != size){
226  av_freep(&graph_data);
227  return NULL;
228  }
229 
230  graph_buf = TF_NewBuffer();
231  graph_buf->data = graph_data;
232  graph_buf->length = size;
233  graph_buf->data_deallocator = free_buffer;
234 
235  return graph_buf;
236 }
237 
238 static TF_Tensor *allocate_input_tensor(const DNNData *input)
239 {
240  TF_DataType dt;
241  size_t size;
242  int64_t input_dims[4] = { 0 };
243 
244  input_dims[0] = 1;
245  input_dims[1] = input->dims[dnn_get_height_idx_by_layout(input->layout)];
246  input_dims[2] = input->dims[dnn_get_width_idx_by_layout(input->layout)];
247  input_dims[3] = input->dims[dnn_get_channel_idx_by_layout(input->layout)];
248  switch (input->dt) {
249  case DNN_FLOAT:
250  dt = TF_FLOAT;
251  size = sizeof(float);
252  break;
253  case DNN_UINT8:
254  dt = TF_UINT8;
255  size = 1;
256  break;
257  default:
258  av_assert0(!"should not reach here");
259  }
260 
261  return TF_AllocateTensor(dt, input_dims, 4,
262  input_dims[1] * input_dims[2] * input_dims[3] * size);
263 }
264 
265 static int get_input_tf(void *model, DNNData *input, const char *input_name)
266 {
267  TFModel *tf_model = model;
268  DnnContext *ctx = tf_model->ctx;
269  TF_Status *status;
270  TF_DataType dt;
271  int64_t dims[4];
272 
273  TF_Output tf_output;
274  tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name);
275  if (!tf_output.oper) {
276  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
277  return AVERROR(EINVAL);
278  }
279 
280  tf_output.index = 0;
281  dt = TF_OperationOutputType(tf_output);
282  switch (dt) {
283  case TF_FLOAT:
284  input->dt = DNN_FLOAT;
285  break;
286  case TF_UINT8:
287  input->dt = DNN_UINT8;
288  break;
289  default:
290  av_log(ctx, AV_LOG_ERROR, "Unsupported output type %d in model\n", dt);
291  return AVERROR(EINVAL);
292  }
293  input->order = DCO_RGB;
294 
295  status = TF_NewStatus();
296  TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status);
297  if (TF_GetCode(status) != TF_OK){
298  TF_DeleteStatus(status);
299  av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor shape: number of dimension incorrect\n");
300  return DNN_GENERIC_ERROR;
301  }
302  TF_DeleteStatus(status);
303 
304  // currently only NHWC is supported
305  av_assert0(dims[0] == 1 || dims[0] == -1);
306  for (int i = 0; i < 4; i++)
307  input->dims[i] = dims[i];
308  input->layout = DL_NHWC;
309 
310  return 0;
311 }
312 
313 static int get_output_tf(void *model, const char *input_name, int input_width, int input_height,
314  const char *output_name, int *output_width, int *output_height)
315 {
316  int ret;
317  TFModel *tf_model = model;
318  DnnContext *ctx = tf_model->ctx;
319  TaskItem task;
320  TFRequestItem *request;
321  DNNExecBaseParams exec_params = {
322  .input_name = input_name,
323  .output_names = &output_name,
324  .nb_output = 1,
325  .in_frame = NULL,
326  .out_frame = NULL,
327  };
328 
329  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx);
330  if (ret != 0) {
331  goto err;
332  }
333 
334  ret = extract_lltask_from_task(&task, tf_model->lltask_queue);
335  if (ret != 0) {
336  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
337  goto err;
338  }
339 
340  request = ff_safe_queue_pop_front(tf_model->request_queue);
341  if (!request) {
342  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
343  ret = AVERROR(EINVAL);
344  goto err;
345  }
346 
347  ret = execute_model_tf(request, tf_model->lltask_queue);
348  *output_width = task.out_frame->width;
349  *output_height = task.out_frame->height;
350 
351 err:
352  av_frame_free(&task.out_frame);
353  av_frame_free(&task.in_frame);
354  return ret;
355 }
356 
357 #define SPACE_CHARS " \t\r\n"
358 static int hex_to_data(uint8_t *data, const char *p)
359 {
360  int c, len, v;
361 
362  len = 0;
363  v = 1;
364  for (;;) {
365  p += strspn(p, SPACE_CHARS);
366  if (*p == '\0')
367  break;
368  c = av_toupper((unsigned char) *p++);
369  if (c >= '0' && c <= '9')
370  c = c - '0';
371  else if (c >= 'A' && c <= 'F')
372  c = c - 'A' + 10;
373  else
374  break;
375  v = (v << 4) | c;
376  if (v & 0x100) {
377  if (data) {
378  data[len] = v;
379  }
380  len++;
381  v = 1;
382  }
383  }
384  return len;
385 }
386 
387 static int load_tf_model(TFModel *tf_model, const char *model_filename)
388 {
389  DnnContext *ctx = tf_model->ctx;
390  TF_Buffer *graph_def;
391  TF_ImportGraphDefOptions *graph_opts;
392  TF_SessionOptions *sess_opts;
393  const TF_Operation *init_op;
394  uint8_t *sess_config = NULL;
395  int sess_config_length = 0;
396 
397  // prepare the sess config data
398  if (ctx->tf_option.sess_config != NULL) {
399  const char *config;
400  /*
401  tf_model->ctx.options.sess_config is hex to present the serialized proto
402  required by TF_SetConfig below, so we need to first generate the serialized
403  proto in a python script, tools/python/tf_sess_config.py is a script example
404  to generate the configs of sess_config.
405  */
406  if (strncmp(ctx->tf_option.sess_config, "0x", 2) != 0) {
407  av_log(ctx, AV_LOG_ERROR, "sess_config should start with '0x'\n");
408  return AVERROR(EINVAL);
409  }
410  config = ctx->tf_option.sess_config + 2;
411  sess_config_length = hex_to_data(NULL, config);
412 
413  sess_config = av_mallocz(sess_config_length + AV_INPUT_BUFFER_PADDING_SIZE);
414  if (!sess_config) {
415  av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
416  return AVERROR(ENOMEM);
417  }
418  if (hex_to_data(sess_config, config) < 0) {
419  av_log(ctx, AV_LOG_ERROR, "failed to convert hex to data\n");
420  return AVERROR(EINVAL);
421  }
422  }
423 
424  graph_def = read_graph(model_filename);
425  if (!graph_def){
426  av_log(ctx, AV_LOG_ERROR, "Failed to read model \"%s\" graph\n", model_filename);
427  av_freep(&sess_config);
428  return AVERROR(EINVAL);
429  }
430  tf_model->graph = TF_NewGraph();
431  tf_model->status = TF_NewStatus();
432  graph_opts = TF_NewImportGraphDefOptions();
433  TF_GraphImportGraphDef(tf_model->graph, graph_def, graph_opts, tf_model->status);
434  TF_DeleteImportGraphDefOptions(graph_opts);
435  TF_DeleteBuffer(graph_def);
436  if (TF_GetCode(tf_model->status) != TF_OK){
437  av_log(ctx, AV_LOG_ERROR, "Failed to import serialized graph to model graph\n");
438  av_freep(&sess_config);
439  return DNN_GENERIC_ERROR;
440  }
441 
442  init_op = TF_GraphOperationByName(tf_model->graph, "init");
443  sess_opts = TF_NewSessionOptions();
444 
445  if (sess_config) {
446  TF_SetConfig(sess_opts, sess_config, sess_config_length,tf_model->status);
447  av_freep(&sess_config);
448  if (TF_GetCode(tf_model->status) != TF_OK) {
449  TF_DeleteSessionOptions(sess_opts);
450  av_log(ctx, AV_LOG_ERROR, "Failed to set config for sess options with %s\n",
451  ctx->tf_option.sess_config);
452  return DNN_GENERIC_ERROR;
453  }
454  }
455 
456  tf_model->session = TF_NewSession(tf_model->graph, sess_opts, tf_model->status);
457  TF_DeleteSessionOptions(sess_opts);
458  if (TF_GetCode(tf_model->status) != TF_OK)
459  {
460  av_freep(&sess_config);
461  av_log(ctx, AV_LOG_ERROR, "Failed to create new session with model graph\n");
462  return DNN_GENERIC_ERROR;
463  }
464 
465  // Run initialization operation with name "init" if it is present in graph
466  if (init_op){
467  TF_SessionRun(tf_model->session, NULL,
468  NULL, NULL, 0,
469  NULL, NULL, 0,
470  &init_op, 1, NULL, tf_model->status);
471  if (TF_GetCode(tf_model->status) != TF_OK)
472  {
473  av_freep(&sess_config);
474  av_log(ctx, AV_LOG_ERROR, "Failed to run session when initializing\n");
475  return DNN_GENERIC_ERROR;
476  }
477  }
478 
479  return 0;
480 }
481 
482 static void dnn_free_model_tf(DNNModel **model)
483 {
484  TFModel *tf_model;
485 
486  if (*model){
487  tf_model = (*model)->model;
488  while (ff_safe_queue_size(tf_model->request_queue) != 0) {
490  destroy_request_item(&item);
491  }
493 
494  while (ff_queue_size(tf_model->lltask_queue) != 0) {
496  av_freep(&item);
497  }
498  ff_queue_destroy(tf_model->lltask_queue);
499 
500  while (ff_queue_size(tf_model->task_queue) != 0) {
501  TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
502  av_frame_free(&item->in_frame);
503  av_frame_free(&item->out_frame);
504  av_freep(&item);
505  }
506  ff_queue_destroy(tf_model->task_queue);
507 
508  if (tf_model->graph){
509  TF_DeleteGraph(tf_model->graph);
510  }
511  if (tf_model->session){
512  TF_CloseSession(tf_model->session, tf_model->status);
513  TF_DeleteSession(tf_model->session, tf_model->status);
514  }
515  if (tf_model->status){
516  TF_DeleteStatus(tf_model->status);
517  }
518  av_freep(&tf_model);
519  av_freep(&model);
520  }
521 }
522 
524 {
525  DNNModel *model = NULL;
526  TFModel *tf_model = NULL;
527 
528  model = av_mallocz(sizeof(DNNModel));
529  if (!model){
530  return NULL;
531  }
532 
533  tf_model = av_mallocz(sizeof(TFModel));
534  if (!tf_model){
535  av_freep(&model);
536  return NULL;
537  }
538  model->model = tf_model;
539  tf_model->model = model;
540  tf_model->ctx = ctx;
541 
542  if (load_tf_model(tf_model, ctx->model_filename) != 0){
543  av_log(ctx, AV_LOG_ERROR, "Failed to load TensorFlow model: \"%s\"\n", ctx->model_filename);
544  goto err;
545  }
546 
547  if (ctx->nireq <= 0) {
548  ctx->nireq = av_cpu_count() / 2 + 1;
549  }
550 
551 #if !HAVE_PTHREAD_CANCEL
552  if (ctx->options.async) {
553  ctx->options.async = 0;
554  av_log(filter_ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
555  }
556 #endif
557 
558  tf_model->request_queue = ff_safe_queue_create();
559  if (!tf_model->request_queue) {
560  goto err;
561  }
562 
563  for (int i = 0; i < ctx->nireq; i++) {
564  TFRequestItem *item = av_mallocz(sizeof(*item));
565  if (!item) {
566  goto err;
567  }
568  item->lltask = NULL;
570  if (!item->infer_request) {
571  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for TensorFlow inference request\n");
572  av_freep(&item);
573  goto err;
574  }
575  item->status = TF_NewStatus();
578  item->exec_module.args = item;
579 
580  if (ff_safe_queue_push_back(tf_model->request_queue, item) < 0) {
581  destroy_request_item(&item);
582  goto err;
583  }
584  }
585 
586  tf_model->lltask_queue = ff_queue_create();
587  if (!tf_model->lltask_queue) {
588  goto err;
589  }
590 
591  tf_model->task_queue = ff_queue_create();
592  if (!tf_model->task_queue) {
593  goto err;
594  }
595 
596  model->get_input = &get_input_tf;
597  model->get_output = &get_output_tf;
598  model->filter_ctx = filter_ctx;
599  model->func_type = func_type;
600 
601  return model;
602 err:
603  dnn_free_model_tf(&model);
604  return NULL;
605 }
606 
607 static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
608  DNNData input = { 0 };
609  LastLevelTaskItem *lltask;
610  TaskItem *task;
611  TFInferRequest *infer_request = NULL;
612  DnnContext *ctx = tf_model->ctx;
613  int ret = 0;
614 
615  lltask = ff_queue_pop_front(tf_model->lltask_queue);
616  av_assert0(lltask);
617  task = lltask->task;
618  request->lltask = lltask;
619 
620  ret = get_input_tf(tf_model, &input, task->input_name);
621  if (ret != 0) {
622  goto err;
623  }
624 
625  infer_request = request->infer_request;
626  input.dims[1] = task->in_frame->height;
627  input.dims[2] = task->in_frame->width;
628 
629  infer_request->tf_input = av_malloc(sizeof(TF_Output));
630  if (!infer_request->tf_input) {
631  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
632  ret = AVERROR(ENOMEM);
633  goto err;
634  }
635 
636  infer_request->tf_input->oper = TF_GraphOperationByName(tf_model->graph, task->input_name);
637  if (!infer_request->tf_input->oper){
638  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", task->input_name);
640  goto err;
641  }
642  infer_request->tf_input->index = 0;
643 
644  infer_request->input_tensor = allocate_input_tensor(&input);
645  if (!infer_request->input_tensor){
646  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
647  ret = AVERROR(ENOMEM);
648  goto err;
649  }
650  input.data = (float *)TF_TensorData(infer_request->input_tensor);
651 
652  switch (tf_model->model->func_type) {
653  case DFT_PROCESS_FRAME:
654  if (task->do_ioproc) {
655  if (tf_model->model->frame_pre_proc != NULL) {
656  tf_model->model->frame_pre_proc(task->in_frame, &input, tf_model->model->filter_ctx);
657  } else {
659  }
660  }
661  break;
664  break;
665  default:
666  avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model->func_type);
667  break;
668  }
669 
670  infer_request->tf_outputs = av_malloc_array(task->nb_output, sizeof(TF_Output));
671  if (infer_request->tf_outputs == NULL) {
672  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n");
673  ret = AVERROR(ENOMEM);
674  goto err;
675  }
676 
677  infer_request->output_tensors = av_calloc(task->nb_output, sizeof(*infer_request->output_tensors));
678  if (!infer_request->output_tensors) {
679  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n");
680  ret = AVERROR(ENOMEM);
681  goto err;
682  }
683 
684  for (int i = 0; i < task->nb_output; ++i) {
685  infer_request->output_tensors[i] = NULL;
686  infer_request->tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, task->output_names[i]);
687  if (!infer_request->tf_outputs[i].oper) {
688  av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", task->output_names[i]);
690  goto err;
691  }
692  infer_request->tf_outputs[i].index = 0;
693  }
694 
695  return 0;
696 err:
697  tf_free_request(infer_request);
698  return ret;
699 }
700 
701 static void infer_completion_callback(void *args) {
702  TFRequestItem *request = args;
703  LastLevelTaskItem *lltask = request->lltask;
704  TaskItem *task = lltask->task;
705  DNNData *outputs;
706  TFInferRequest *infer_request = request->infer_request;
707  TFModel *tf_model = task->model;
708  DnnContext *ctx = tf_model->ctx;
709 
710  outputs = av_calloc(task->nb_output, sizeof(*outputs));
711  if (!outputs) {
712  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n");
713  goto err;
714  }
715 
716  for (uint32_t i = 0; i < task->nb_output; ++i) {
718  TF_Dim(infer_request->output_tensors[i], 1);
720  TF_Dim(infer_request->output_tensors[i], 2);
722  TF_Dim(infer_request->output_tensors[i], 3);
723  outputs[i].data = TF_TensorData(infer_request->output_tensors[i]);
724  outputs[i].dt = (DNNDataType)TF_TensorType(infer_request->output_tensors[i]);
725  }
726  switch (tf_model->model->func_type) {
727  case DFT_PROCESS_FRAME:
728  //it only support 1 output if it's frame in & frame out
729  if (task->do_ioproc) {
730  if (tf_model->model->frame_post_proc != NULL) {
731  tf_model->model->frame_post_proc(task->out_frame, outputs, tf_model->model->filter_ctx);
732  } else {
734  }
735  } else {
736  task->out_frame->width =
738  task->out_frame->height =
740  }
741  break;
743  if (!tf_model->model->detect_post_proc) {
744  av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
745  return;
746  }
747  tf_model->model->detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model->filter_ctx);
748  break;
749  default:
750  av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
751  goto err;
752  }
753  task->inference_done++;
754 err:
755  tf_free_request(infer_request);
756  av_freep(&outputs);
757 
758  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
759  destroy_request_item(&request);
760  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
761  }
762 }
763 
764 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
765 {
766  TFModel *tf_model;
767  DnnContext *ctx;
768  LastLevelTaskItem *lltask;
769  TaskItem *task;
770  int ret = 0;
771 
772  if (ff_queue_size(lltask_queue) == 0) {
773  destroy_request_item(&request);
774  return 0;
775  }
776 
777  lltask = ff_queue_peek_front(lltask_queue);
778  task = lltask->task;
779  tf_model = task->model;
780  ctx = tf_model->ctx;
781 
782  ret = fill_model_input_tf(tf_model, request);
783  if (ret != 0) {
784  goto err;
785  }
786 
787  if (task->async) {
788  if (ff_dnn_start_inference_async(ctx, &request->exec_module) != 0) {
789  goto err;
790  }
791  return 0;
792  }
793  else {
794  ret = tf_start_inference(request);
795  if (ret != 0) {
796  goto err;
797  }
798  infer_completion_callback(request);
799  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
800  }
801 err:
802  tf_free_request(request->infer_request);
803  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
804  destroy_request_item(&request);
805  }
806  dnn_free_model_tf(&tf_model->model);
807  return ret;
808 }
809 
810 static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
811 {
812  TFModel *tf_model = model->model;
813  DnnContext *ctx = tf_model->ctx;
814  TaskItem *task;
815  TFRequestItem *request;
816  int ret = 0;
817 
818  ret = ff_check_exec_params(ctx, DNN_TF, model->func_type, exec_params);
819  if (ret != 0) {
820  return ret;
821  }
822 
823  task = av_malloc(sizeof(*task));
824  if (!task) {
825  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
826  return AVERROR(ENOMEM);
827  }
828 
829  ret = ff_dnn_fill_task(task, exec_params, tf_model, ctx->async, 1);
830  if (ret != 0) {
831  av_log(ctx, AV_LOG_ERROR, "Fill task with invalid parameter(s).\n");
832  av_freep(&task);
833  return ret;
834  }
835 
836  if (ff_queue_push_back(tf_model->task_queue, task) < 0) {
837  av_freep(&task);
838  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
839  return AVERROR(ENOMEM);
840  }
841 
842  ret = extract_lltask_from_task(task, tf_model->lltask_queue);
843  if (ret != 0) {
844  av_freep(&task);
845  av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
846  return ret;
847  }
848 
849  request = ff_safe_queue_pop_front(tf_model->request_queue);
850  if (!request) {
851  av_freep(&task);
852  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
853  return AVERROR(EINVAL);
854  }
855  return execute_model_tf(request, tf_model->lltask_queue);
856 }
857 
859 {
860  TFModel *tf_model = model->model;
861  return ff_dnn_get_result_common(tf_model->task_queue, in, out);
862 }
863 
864 static int dnn_flush_tf(const DNNModel *model)
865 {
866  TFModel *tf_model = model->model;
867  DnnContext *ctx = tf_model->ctx;
868  TFRequestItem *request;
869  int ret;
870 
871  if (ff_queue_size(tf_model->lltask_queue) == 0) {
872  // no pending task need to flush
873  return 0;
874  }
875 
876  request = ff_safe_queue_pop_front(tf_model->request_queue);
877  if (!request) {
878  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
879  return AVERROR(EINVAL);
880  }
881 
882  ret = fill_model_input_tf(tf_model, request);
883  if (ret != 0) {
884  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
885  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
886  destroy_request_item(&request);
887  }
888  return ret;
889  }
890 
891  return ff_dnn_start_inference_async(ctx, &request->exec_module);
892 }
893 
895  .clazz = DNN_DEFINE_CLASS(dnn_tensorflow),
896  .load_model = dnn_load_model_tf,
897  .execute_model = dnn_execute_model_tf,
898  .get_result = dnn_get_result_tf,
899  .flush = dnn_flush_tf,
900  .free_model = dnn_free_model_tf,
901 };
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
TFInferRequest
Stores execution parameters for single call to the TensorFlow C API.
Definition: dnn_backend_tf.c:54
TFInferRequest::tf_outputs
TF_Output * tf_outputs
Definition: dnn_backend_tf.c:55
execute_model_tf
static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
Definition: dnn_backend_tf.c:764
FLAGS
#define FLAGS
Definition: dnn_backend_tf.c:69
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
TFModel::graph
TF_Graph * graph
Definition: dnn_backend_tf.c:42
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:55
DNNAsyncExecModule
Common Async Execution Mechanism for the DNN Backends.
Definition: dnn_backend_common.h:65
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
extract_lltask_from_task
static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
Definition: dnn_backend_tf.c:185
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:30
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
LastLevelTaskItem
Definition: dnn_backend_common.h:57
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVFrame::width
int width
Definition: frame.h:446
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:357
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:108
TFInferRequest::input_tensor
TF_Tensor * input_tensor
Definition: dnn_backend_tf.c:58
data
const char data[16]
Definition: mxf.c:148
avio_open
int avio_open(AVIOContext **s, const char *filename, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:497
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:77
load_tf_model
static int load_tf_model(TFModel *tf_model, const char *model_filename)
Definition: dnn_backend_tf.c:387
dnn_io_proc.h
TFModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_tf.c:45
TaskItem
Definition: dnn_backend_common.h:43
DNNAsyncExecModule::callback
void(* callback)(void *args)
Completion Callback for the backend.
Definition: dnn_backend_common.h:77
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OFFSET
#define OFFSET(x)
Definition: dnn_backend_tf.c:68
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
destroy_request_item
static void destroy_request_item(TFRequestItem **arg)
Free the TFRequestItem completely.
Definition: dnn_backend_tf.c:171
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:97
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
dnn_get_width_idx_by_layout
static int dnn_get_width_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:194
TaskItem::model
void * model
Definition: dnn_backend_common.h:44
DnnContext
Definition: dnn_interface.h:141
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:52
dnn_load_model_tf
static DNNModel * dnn_load_model_tf(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
Definition: dnn_backend_tf.c:523
get_input_tf
static int get_input_tf(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_tf.c:265
DL_NHWC
@ DL_NHWC
Definition: dnn_interface.h:62
SPACE_CHARS
#define SPACE_CHARS
Definition: dnn_backend_tf.c:357
Queue
Linear double-ended data structure.
Definition: queue.c:33
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_TF
@ DNN_TF
Definition: dnn_interface.h:35
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
fill_model_input_tf
static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request)
Definition: dnn_backend_tf.c:607
TFRequestItem::exec_module
DNNAsyncExecModule exec_module
Definition: dnn_backend_tf.c:65
float
float
Definition: af_crystalizer.c:121
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:58
TFModel::ctx
DnnContext * ctx
Definition: dnn_backend_tf.c:40
read_graph
static TF_Buffer * read_graph(const char *model_filename)
Definition: dnn_backend_tf.c:205
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:65
DNNModule::clazz
const AVClass clazz
Definition: dnn_interface.h:174
dnn_tensorflow_options
static const AVOption dnn_tensorflow_options[]
Definition: dnn_backend_tf.c:70
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:156
ctx
AVFormatContext * ctx
Definition: movenc.c:49
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:52
arg
const char * arg
Definition: jacosubdec.c:67
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:182
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:423
NULL
#define NULL
Definition: coverity.c:32
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:111
tf_create_inference_request
static TFInferRequest * tf_create_inference_request(void)
Create a TensorFlow inference request.
Definition: dnn_backend_tf.c:120
ff_dnn_async_module_cleanup
int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
Join the Async Execution thread and set module pointers to NULL.
Definition: dnn_backend_common.c:86
TFModel::task_queue
Queue * task_queue
Definition: dnn_backend_tf.c:47
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_tf.c:701
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:45
TFModel::status
TF_Status * status
Definition: dnn_backend_tf.c:44
tf_free_request
static void tf_free_request(TFInferRequest *request)
Free the contents of TensorFlow inference request.
Definition: dnn_backend_tf.c:92
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:211
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:49
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:53
cpu.h
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:113
size
int size
Definition: twinvq_data.h:10344
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:99
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
dnn_flush_tf
static int dnn_flush_tf(const DNNModel *model)
Definition: dnn_backend_tf.c:864
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
hex_to_data
static int hex_to_data(uint8_t *data, const char *p)
Definition: dnn_backend_tf.c:358
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
tf_start_inference
static int tf_start_inference(void *args)
Start synchronous inference for the TensorFlow model.
Definition: dnn_backend_tf.c:141
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:50
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
DNN_DEFINE_CLASS
#define DNN_DEFINE_CLASS(fname)
Definition: dnn_backend_common.h:39
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
ff_dnn_backend_tf
const DNNModule ff_dnn_backend_tf
Definition: dnn_backend_tf.c:894
dnn_execute_model_tf
static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_tf.c:810
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
TFRequestItem::status
TF_Status * status
Definition: dnn_backend_tf.c:64
TFInferRequest::output_tensors
TF_Tensor ** output_tensors
Definition: dnn_backend_tf.c:56
TFModel::session
TF_Session * session
Definition: dnn_backend_tf.c:43
TFRequestItem::infer_request
TFInferRequest * infer_request
Definition: dnn_backend_tf.c:62
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
DNNAsyncExecModule::start_inference
int(* start_inference)(void *request)
Synchronous inference function for the backend with corresponding request item as the argument.
Definition: dnn_backend_common.h:70
DNNAsyncExecModule::args
void * args
Argument for the execution functions.
Definition: dnn_backend_common.h:83
av_toupper
static av_const int av_toupper(int c)
Locale-independent conversion of ASCII characters to uppercase.
Definition: avstring.h:227
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
safe_queue.h
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:48
len
int len
Definition: vorbis_enc_data.h:426
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:311
TFInferRequest::tf_input
TF_Output * tf_input
Definition: dnn_backend_tf.c:57
ret
ret
Definition: filter_design.txt:187
DNNModel::get_input
int(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:102
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TFModel::model
DNNModel * model
Definition: dnn_backend_tf.c:41
TFModel
Definition: dnn_backend_tf.c:39
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
dnn_get_result_tf
static DNNAsyncStatusType dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_tf.c:858
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:46
AVFrame::height
int height
Definition: frame.h:446
status
ov_status_e status
Definition: dnn_backend_openvino.c:101
allocate_input_tensor
static TF_Tensor * allocate_input_tensor(const DNNData *input)
Definition: dnn_backend_tf.c:238
dnn_backend_common.h
TFRequestItem::lltask
LastLevelTaskItem * lltask
Definition: dnn_backend_tf.c:63
defs.h
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:136
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DCO_RGB
@ DCO_RGB
Definition: dnn_interface.h:42
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
ff_dnn_start_inference_async
int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
Start asynchronous inference routine for the TensorFlow model on a detached thread.
Definition: dnn_backend_common.c:105
DNNModel
Definition: dnn_interface.h:93
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
mem.h
dnn_get_height_idx_by_layout
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:199
dnn_free_model_tf
static void dnn_free_model_tf(DNNModel **model)
Definition: dnn_backend_tf.c:482
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:47
dnn_get_channel_idx_by_layout
static int dnn_get_channel_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:204
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: avio.c:649
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
free_buffer
static void free_buffer(void *data, size_t length)
Definition: dnn_backend_tf.c:80
get_output_tf
static int get_output_tf(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_tf.c:313
DNNExecBaseParams
Definition: dnn_interface.h:76
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:50
DNNModel::get_output
int(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:104
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:249
TFRequestItem
Definition: dnn_backend_tf.c:61
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
TFModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_tf.c:46
TaskItem::nb_output
uint32_t nb_output
Definition: dnn_backend_common.h:51
DNNModule
Definition: dnn_interface.h:173
DNNModel::model
void * model
Definition: dnn_interface.h:95
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:42