FFmpeg
dnn_backend_native.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN native backend implementation.
24  */
25 
26 #include "dnn_backend_native.h"
27 #include "libavutil/avassert.h"
28 
29 static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
30 {
31  ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
32  InputParams *input_params;
33  ConvolutionalParams *conv_params;
34  DepthToSpaceParams *depth_to_space_params;
35  int cur_width, cur_height, cur_channels;
36  int32_t layer;
37 
38  if (network->layers_num <= 0 || network->layers[0].type != INPUT){
39  return DNN_ERROR;
40  }
41  else{
42  input_params = (InputParams *)network->layers[0].params;
43  input_params->width = cur_width = input->width;
44  input_params->height = cur_height = input->height;
45  input_params->channels = cur_channels = input->channels;
46  if (input->data){
47  av_freep(&input->data);
48  }
49  av_assert0(input->dt == DNN_FLOAT);
50  network->layers[0].output = input->data = av_malloc(cur_height * cur_width * cur_channels * sizeof(float));
51  if (!network->layers[0].output){
52  return DNN_ERROR;
53  }
54  }
55 
56  for (layer = 1; layer < network->layers_num; ++layer){
57  switch (network->layers[layer].type){
58  case CONV:
59  conv_params = (ConvolutionalParams *)network->layers[layer].params;
60  if (conv_params->input_num != cur_channels){
61  return DNN_ERROR;
62  }
63  cur_channels = conv_params->output_num;
64 
65  if (conv_params->padding_method == VALID) {
66  int pad_size = (conv_params->kernel_size - 1) * conv_params->dilation;
67  cur_height -= pad_size;
68  cur_width -= pad_size;
69  }
70  break;
71  case DEPTH_TO_SPACE:
72  depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
73  if (cur_channels % (depth_to_space_params->block_size * depth_to_space_params->block_size) != 0){
74  return DNN_ERROR;
75  }
76  cur_channels = cur_channels / (depth_to_space_params->block_size * depth_to_space_params->block_size);
77  cur_height *= depth_to_space_params->block_size;
78  cur_width *= depth_to_space_params->block_size;
79  break;
80  default:
81  return DNN_ERROR;
82  }
83  if (network->layers[layer].output){
84  av_freep(&network->layers[layer].output);
85  }
86 
87  if (cur_height <= 0 || cur_width <= 0)
88  return DNN_ERROR;
89 
90  network->layers[layer].output = av_malloc(cur_height * cur_width * cur_channels * sizeof(float));
91  if (!network->layers[layer].output){
92  return DNN_ERROR;
93  }
94  }
95 
96  return DNN_SUCCESS;
97 }
98 
99 // Loads model and its parameters that are stored in a binary file with following structure:
100 // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
101 // For CONV layer: activation_function, input_num, output_num, kernel_size, kernel, biases
102 // For DEPTH_TO_SPACE layer: block_size
103 DNNModel *ff_dnn_load_model_native(const char *model_filename)
104 {
105  DNNModel *model = NULL;
106  ConvolutionalNetwork *network = NULL;
107  AVIOContext *model_file_context;
108  int file_size, dnn_size, kernel_size, i;
109  int32_t layer;
110  DNNLayerType layer_type;
111  ConvolutionalParams *conv_params;
112  DepthToSpaceParams *depth_to_space_params;
113 
114  model = av_malloc(sizeof(DNNModel));
115  if (!model){
116  return NULL;
117  }
118 
119  if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
120  av_freep(&model);
121  return NULL;
122  }
123  file_size = avio_size(model_file_context);
124 
125  network = av_malloc(sizeof(ConvolutionalNetwork));
126  if (!network){
127  avio_closep(&model_file_context);
128  av_freep(&model);
129  return NULL;
130  }
131  model->model = (void *)network;
132 
133  network->layers_num = 1 + (int32_t)avio_rl32(model_file_context);
134  dnn_size = 4;
135 
136  network->layers = av_malloc(network->layers_num * sizeof(Layer));
137  if (!network->layers){
138  av_freep(&network);
139  avio_closep(&model_file_context);
140  av_freep(&model);
141  return NULL;
142  }
143 
144  for (layer = 0; layer < network->layers_num; ++layer){
145  network->layers[layer].output = NULL;
146  network->layers[layer].params = NULL;
147  }
148  network->layers[0].type = INPUT;
149  network->layers[0].params = av_malloc(sizeof(InputParams));
150  if (!network->layers[0].params){
151  avio_closep(&model_file_context);
152  ff_dnn_free_model_native(&model);
153  return NULL;
154  }
155 
156  for (layer = 1; layer < network->layers_num; ++layer){
157  layer_type = (int32_t)avio_rl32(model_file_context);
158  dnn_size += 4;
159  switch (layer_type){
160  case CONV:
161  conv_params = av_malloc(sizeof(ConvolutionalParams));
162  if (!conv_params){
163  avio_closep(&model_file_context);
164  ff_dnn_free_model_native(&model);
165  return NULL;
166  }
167  conv_params->dilation = (int32_t)avio_rl32(model_file_context);
168  conv_params->padding_method = (int32_t)avio_rl32(model_file_context);
169  conv_params->activation = (int32_t)avio_rl32(model_file_context);
170  conv_params->input_num = (int32_t)avio_rl32(model_file_context);
171  conv_params->output_num = (int32_t)avio_rl32(model_file_context);
172  conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
173  kernel_size = conv_params->input_num * conv_params->output_num *
174  conv_params->kernel_size * conv_params->kernel_size;
175  dnn_size += 24 + (kernel_size + conv_params->output_num << 2);
176  if (dnn_size > file_size || conv_params->input_num <= 0 ||
177  conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
178  avio_closep(&model_file_context);
179  ff_dnn_free_model_native(&model);
180  return NULL;
181  }
182  conv_params->kernel = av_malloc(kernel_size * sizeof(float));
183  conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
184  if (!conv_params->kernel || !conv_params->biases){
185  avio_closep(&model_file_context);
186  ff_dnn_free_model_native(&model);
187  return NULL;
188  }
189  for (i = 0; i < kernel_size; ++i){
190  conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
191  }
192  for (i = 0; i < conv_params->output_num; ++i){
193  conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
194  }
195  network->layers[layer].type = CONV;
196  network->layers[layer].params = conv_params;
197  break;
198  case DEPTH_TO_SPACE:
199  depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
200  if (!depth_to_space_params){
201  avio_closep(&model_file_context);
202  ff_dnn_free_model_native(&model);
203  return NULL;
204  }
205  depth_to_space_params->block_size = (int32_t)avio_rl32(model_file_context);
206  dnn_size += 4;
207  network->layers[layer].type = DEPTH_TO_SPACE;
208  network->layers[layer].params = depth_to_space_params;
209  break;
210  default:
211  avio_closep(&model_file_context);
212  ff_dnn_free_model_native(&model);
213  return NULL;
214  }
215  }
216 
217  avio_closep(&model_file_context);
218 
219  if (dnn_size != file_size){
220  ff_dnn_free_model_native(&model);
221  return NULL;
222  }
223 
225 
226  return model;
227 }
228 
229 #define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
230 
231 static void convolve(const float *input, float *output, const ConvolutionalParams *conv_params, int width, int height)
232 {
233  int radius = conv_params->kernel_size >> 1;
234  int src_linesize = width * conv_params->input_num;
235  int filter_linesize = conv_params->kernel_size * conv_params->input_num;
236  int filter_size = conv_params->kernel_size * filter_linesize;
237  int pad_size = (conv_params->padding_method == VALID) ? (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0;
238 
239  for (int y = pad_size; y < height - pad_size; ++y) {
240  for (int x = pad_size; x < width - pad_size; ++x) {
241  for (int n_filter = 0; n_filter < conv_params->output_num; ++n_filter) {
242  output[n_filter] = conv_params->biases[n_filter];
243 
244  for (int ch = 0; ch < conv_params->input_num; ++ch) {
245  for (int kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y) {
246  for (int kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x) {
247  float input_pel;
248  if (conv_params->padding_method == SAME_CLAMP_TO_EDGE) {
249  int y_pos = CLAMP_TO_EDGE(y + (kernel_y - radius) * conv_params->dilation, height);
250  int x_pos = CLAMP_TO_EDGE(x + (kernel_x - radius) * conv_params->dilation, width);
251  input_pel = input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
252  } else {
253  int y_pos = y + (kernel_y - radius) * conv_params->dilation;
254  int x_pos = x + (kernel_x - radius) * conv_params->dilation;
255  input_pel = (x_pos < 0 || x_pos >= width || y_pos < 0 || y_pos >= height) ? 0.0 :
256  input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
257  }
258 
259 
260  output[n_filter] += input_pel * conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
261  kernel_x * conv_params->input_num + ch];
262  }
263  }
264  }
265  switch (conv_params->activation){
266  case RELU:
267  output[n_filter] = FFMAX(output[n_filter], 0.0);
268  break;
269  case TANH:
270  output[n_filter] = 2.0f / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
271  break;
272  case SIGMOID:
273  output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
274  break;
275  case NONE:
276  break;
277  case LEAKY_RELU:
278  output[n_filter] = FFMAX(output[n_filter], 0.0) + 0.2 * FFMIN(output[n_filter], 0.0);
279  }
280  }
281  output += conv_params->output_num;
282  }
283  }
284 }
285 
286 static void depth_to_space(const float *input, float *output, int block_size, int width, int height, int channels)
287 {
288  int y, x, by, bx, ch;
289  int new_channels = channels / (block_size * block_size);
290  int output_linesize = width * channels;
291  int by_linesize = output_linesize / block_size;
292  int x_linesize = new_channels * block_size;
293 
294  for (y = 0; y < height; ++y){
295  for (x = 0; x < width; ++x){
296  for (by = 0; by < block_size; ++by){
297  for (bx = 0; bx < block_size; ++bx){
298  for (ch = 0; ch < new_channels; ++ch){
299  output[by * by_linesize + x * x_linesize + bx * new_channels + ch] = input[ch];
300  }
301  input += new_channels;
302  }
303  }
304  }
305  output += output_linesize;
306  }
307 }
308 
310 {
311  ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model;
312  int cur_width, cur_height, cur_channels;
313  int32_t layer;
314  InputParams *input_params;
315  ConvolutionalParams *conv_params;
316  DepthToSpaceParams *depth_to_space_params;
317 
318  if (network->layers_num <= 0 || network->layers[0].type != INPUT || !network->layers[0].output){
319  return DNN_ERROR;
320  }
321  else{
322  input_params = (InputParams *)network->layers[0].params;
323  cur_width = input_params->width;
324  cur_height = input_params->height;
325  cur_channels = input_params->channels;
326  }
327 
328  for (layer = 1; layer < network->layers_num; ++layer){
329  if (!network->layers[layer].output){
330  return DNN_ERROR;
331  }
332  switch (network->layers[layer].type){
333  case CONV:
334  conv_params = (ConvolutionalParams *)network->layers[layer].params;
335  convolve(network->layers[layer - 1].output, network->layers[layer].output, conv_params, cur_width, cur_height);
336  cur_channels = conv_params->output_num;
337  if (conv_params->padding_method == VALID) {
338  int pad_size = (conv_params->kernel_size - 1) * conv_params->dilation;
339  cur_height -= pad_size;
340  cur_width -= pad_size;
341  }
342  break;
343  case DEPTH_TO_SPACE:
344  depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
345  depth_to_space(network->layers[layer - 1].output, network->layers[layer].output,
346  depth_to_space_params->block_size, cur_width, cur_height, cur_channels);
347  cur_height *= depth_to_space_params->block_size;
348  cur_width *= depth_to_space_params->block_size;
349  cur_channels /= depth_to_space_params->block_size * depth_to_space_params->block_size;
350  break;
351  case INPUT:
352  return DNN_ERROR;
353  }
354  }
355 
356  // native mode does not support multiple outputs yet
357  if (nb_output > 1)
358  return DNN_ERROR;
359  outputs[0].data = network->layers[network->layers_num - 1].output;
360  outputs[0].height = cur_height;
361  outputs[0].width = cur_width;
362  outputs[0].channels = cur_channels;
363 
364  return DNN_SUCCESS;
365 }
366 
368 {
369  ConvolutionalNetwork *network;
370  ConvolutionalParams *conv_params;
371  int32_t layer;
372 
373  if (*model)
374  {
375  network = (ConvolutionalNetwork *)(*model)->model;
376  for (layer = 0; layer < network->layers_num; ++layer){
377  av_freep(&network->layers[layer].output);
378  if (network->layers[layer].type == CONV){
379  conv_params = (ConvolutionalParams *)network->layers[layer].params;
380  av_freep(&conv_params->kernel);
381  av_freep(&conv_params->biases);
382  }
383  av_freep(&network->layers[layer].params);
384  }
385  av_freep(&network->layers);
386  av_freep(&network);
387  av_freep(model);
388  }
389 }
InputParams
Definition: dnn_backend_native.h:54
NONE
@ NONE
Definition: af_afade.c:54
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ConvolutionalParams::kernel
float * kernel
Definition: dnn_backend_native.h:50
INPUT
@ INPUT
Definition: dnn_backend_native.h:33
channels
channels
Definition: aptx.c:30
DNNModel::set_input_output
DNNReturnType(* set_input_output)(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
Definition: dnn_interface.h:53
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
depth_to_space
static void depth_to_space(const float *input, float *output, int block_size, int width, int height, int channels)
Definition: dnn_backend_native.c:286
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
av_int2float
static av_always_inline float av_int2float(uint32_t i)
Reinterpret a 32-bit integer as a float.
Definition: intfloat.h:40
ConvolutionalNetwork::layers_num
int32_t layers_num
Definition: dnn_backend_native.h:65
DNN_SUCCESS
@ DNN_SUCCESS
Definition: dnn_interface.h:31
ConvolutionalNetwork
Definition: dnn_backend_native.h:63
SIGMOID
@ SIGMOID
Definition: dnn_backend_native.h:35
DNNLayerType
DNNLayerType
Definition: dnn_backend_native.h:33
VALID
@ VALID
Definition: dnn_backend_native.h:37
avassert.h
ConvolutionalParams::input_num
int32_t input_num
Definition: dnn_backend_native.h:46
width
#define width
TANH
@ TANH
Definition: dnn_backend_native.h:35
ff_dnn_load_model_native
DNNModel * ff_dnn_load_model_native(const char *model_filename)
Definition: dnn_backend_native.c:103
DNNInputData
Definition: dnn_interface.h:37
Layer::type
DNNLayerType type
Definition: dnn_backend_native.h:40
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
DNNReturnType
DNNReturnType
Definition: dnn_interface.h:31
DNNData
Definition: dnn_interface.h:43
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
ConvolutionalParams::activation
DNNActivationFunc activation
Definition: dnn_backend_native.h:47
convolve
static void convolve(const float *input, float *output, const ConvolutionalParams *conv_params, int width, int height)
Definition: dnn_backend_native.c:231
InputParams::height
int height
Definition: dnn_backend_native.h:55
f
#define f(width, name)
Definition: cbs_vp9.c:255
int32_t
int32_t
Definition: audio_convert.c:194
if
if(ret)
Definition: filter_design.txt:179
ff_dnn_free_model_native
void ff_dnn_free_model_native(DNNModel **model)
Definition: dnn_backend_native.c:367
Layer::params
void * params
Definition: dnn_backend_native.h:42
NULL
#define NULL
Definition: coverity.c:32
CONV
@ CONV
Definition: dnn_backend_native.h:33
exp
int8_t exp
Definition: eval.c:72
ConvolutionalNetwork::layers
Layer * layers
Definition: dnn_backend_native.h:64
ConvolutionalParams::kernel_size
int32_t kernel_size
Definition: dnn_backend_native.h:46
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
avio_rl32
unsigned int avio_rl32(AVIOContext *s)
Definition: aviobuf.c:769
AVIOContext
Bytestream IO Context.
Definition: avio.h:161
Layer
Definition: dnn_backend_native.h:39
ff_dnn_execute_model_native
DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
Definition: dnn_backend_native.c:309
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:35
DepthToSpaceParams
Definition: dnn_backend_native.h:58
dnn_backend_native.h
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
RELU
@ RELU
Definition: dnn_backend_native.h:35
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1210
ConvolutionalParams::output_num
int32_t output_num
Definition: dnn_backend_native.h:46
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
SAME_CLAMP_TO_EDGE
@ SAME_CLAMP_TO_EDGE
Definition: dnn_backend_native.h:37
DNN_ERROR
@ DNN_ERROR
Definition: dnn_interface.h:31
DEPTH_TO_SPACE
@ DEPTH_TO_SPACE
Definition: dnn_backend_native.h:33
DepthToSpaceParams::block_size
int block_size
Definition: dnn_backend_native.h:59
set_input_output_native
static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
Definition: dnn_backend_native.c:29
CLAMP_TO_EDGE
#define CLAMP_TO_EDGE(x, w)
Definition: dnn_backend_native.c:229
Layer::output
float * output
Definition: dnn_backend_native.h:41
avio_open
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1153
DNNModel
Definition: dnn_interface.h:48
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:654
LEAKY_RELU
@ LEAKY_RELU
Definition: dnn_backend_native.h:35
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
InputParams::channels
int channels
Definition: dnn_backend_native.h:55
ConvolutionalParams::padding_method
DNNConvPaddingParam padding_method
Definition: dnn_backend_native.h:48
InputParams::width
int width
Definition: dnn_backend_native.h:55
ConvolutionalParams
Definition: dnn_backend_native.h:45
ConvolutionalParams::dilation
int32_t dilation
Definition: dnn_backend_native.h:49
DNNModel::model
void * model
Definition: dnn_interface.h:50
ConvolutionalParams::biases
float * biases
Definition: dnn_backend_native.h:51