FFmpeg
dnn_io_proc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "dnn_io_proc.h"
22 #include "libavutil/imgutils.h"
23 #include "libswscale/swscale.h"
24 #include "libavutil/avassert.h"
26 
28 {
29  struct SwsContext *sws_ctx;
30  int bytewidth = av_image_get_linesize(frame->format, frame->width, 0);
31  if (output->dt != DNN_FLOAT) {
32  avpriv_report_missing_feature(log_ctx, "data type rather than DNN_FLOAT");
33  return DNN_ERROR;
34  }
35 
36  switch (frame->format) {
37  case AV_PIX_FMT_RGB24:
38  case AV_PIX_FMT_BGR24:
39  sws_ctx = sws_getContext(frame->width * 3,
40  frame->height,
42  frame->width * 3,
43  frame->height,
45  0, NULL, NULL, NULL);
46  if (!sws_ctx) {
47  av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion "
48  "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
49  av_get_pix_fmt_name(AV_PIX_FMT_GRAYF32), frame->width * 3, frame->height,
50  av_get_pix_fmt_name(AV_PIX_FMT_GRAY8), frame->width * 3, frame->height);
51  return DNN_ERROR;
52  }
53  sws_scale(sws_ctx, (const uint8_t *[4]){(const uint8_t *)output->data, 0, 0, 0},
54  (const int[4]){frame->width * 3 * sizeof(float), 0, 0, 0}, 0, frame->height,
55  (uint8_t * const*)frame->data, frame->linesize);
56  sws_freeContext(sws_ctx);
57  return DNN_SUCCESS;
58  case AV_PIX_FMT_GRAYF32:
59  av_image_copy_plane(frame->data[0], frame->linesize[0],
60  output->data, bytewidth,
61  bytewidth, frame->height);
62  return DNN_SUCCESS;
63  case AV_PIX_FMT_YUV420P:
64  case AV_PIX_FMT_YUV422P:
65  case AV_PIX_FMT_YUV444P:
66  case AV_PIX_FMT_YUV410P:
67  case AV_PIX_FMT_YUV411P:
68  case AV_PIX_FMT_GRAY8:
69  case AV_PIX_FMT_NV12:
70  sws_ctx = sws_getContext(frame->width,
71  frame->height,
73  frame->width,
74  frame->height,
76  0, NULL, NULL, NULL);
77  if (!sws_ctx) {
78  av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion "
79  "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
82  return DNN_ERROR;
83  }
84  sws_scale(sws_ctx, (const uint8_t *[4]){(const uint8_t *)output->data, 0, 0, 0},
85  (const int[4]){frame->width * sizeof(float), 0, 0, 0}, 0, frame->height,
86  (uint8_t * const*)frame->data, frame->linesize);
87  sws_freeContext(sws_ctx);
88  return DNN_SUCCESS;
89  default:
91  return DNN_ERROR;
92  }
93 
94  return DNN_SUCCESS;
95 }
96 
98 {
99  struct SwsContext *sws_ctx;
100  int bytewidth = av_image_get_linesize(frame->format, frame->width, 0);
101  if (input->dt != DNN_FLOAT) {
102  avpriv_report_missing_feature(log_ctx, "data type rather than DNN_FLOAT");
103  return DNN_ERROR;
104  }
105 
106  switch (frame->format) {
107  case AV_PIX_FMT_RGB24:
108  case AV_PIX_FMT_BGR24:
109  sws_ctx = sws_getContext(frame->width * 3,
110  frame->height,
112  frame->width * 3,
113  frame->height,
115  0, NULL, NULL, NULL);
116  if (!sws_ctx) {
117  av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion "
118  "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
119  av_get_pix_fmt_name(AV_PIX_FMT_GRAY8), frame->width * 3, frame->height,
120  av_get_pix_fmt_name(AV_PIX_FMT_GRAYF32),frame->width * 3, frame->height);
121  return DNN_ERROR;
122  }
123  sws_scale(sws_ctx, (const uint8_t **)frame->data,
124  frame->linesize, 0, frame->height,
125  (uint8_t * const*)(&input->data),
126  (const int [4]){frame->width * 3 * sizeof(float), 0, 0, 0});
127  sws_freeContext(sws_ctx);
128  break;
129  case AV_PIX_FMT_GRAYF32:
130  av_image_copy_plane(input->data, bytewidth,
131  frame->data[0], frame->linesize[0],
132  bytewidth, frame->height);
133  break;
134  case AV_PIX_FMT_YUV420P:
135  case AV_PIX_FMT_YUV422P:
136  case AV_PIX_FMT_YUV444P:
137  case AV_PIX_FMT_YUV410P:
138  case AV_PIX_FMT_YUV411P:
139  case AV_PIX_FMT_GRAY8:
140  case AV_PIX_FMT_NV12:
141  sws_ctx = sws_getContext(frame->width,
142  frame->height,
144  frame->width,
145  frame->height,
147  0, NULL, NULL, NULL);
148  if (!sws_ctx) {
149  av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion "
150  "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
153  return DNN_ERROR;
154  }
155  sws_scale(sws_ctx, (const uint8_t **)frame->data,
156  frame->linesize, 0, frame->height,
157  (uint8_t * const*)(&input->data),
158  (const int [4]){frame->width * sizeof(float), 0, 0, 0});
159  sws_freeContext(sws_ctx);
160  break;
161  default:
163  return DNN_ERROR;
164  }
165 
166  return DNN_SUCCESS;
167 }
168 
170 {
171  if (data->dt == DNN_UINT8 && data->order == DCO_BGR) {
172  return AV_PIX_FMT_BGR24;
173  }
174 
175  av_assert0(!"not supported yet.\n");
176  return AV_PIX_FMT_BGR24;
177 }
178 
179 DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
180 {
181  const AVPixFmtDescriptor *desc;
182  int offsetx[4], offsety[4];
183  uint8_t *bbox_data[4];
184  struct SwsContext *sws_ctx;
185  int linesizes[4];
186  enum AVPixelFormat fmt;
187  int left, top, width, height;
189  const AVDetectionBBox *bbox;
191  av_assert0(sd);
192 
193  header = (const AVDetectionBBoxHeader *)sd->data;
194  bbox = av_get_detection_bbox(header, bbox_index);
195 
196  left = bbox->x;
197  width = bbox->w;
198  top = bbox->y;
199  height = bbox->h;
200 
201  fmt = get_pixel_format(input);
202  sws_ctx = sws_getContext(width, height, frame->format,
203  input->width, input->height, fmt,
205  if (!sws_ctx) {
206  av_log(log_ctx, AV_LOG_ERROR, "Failed to create scale context for the conversion "
207  "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
209  av_get_pix_fmt_name(fmt), input->width, input->height);
210  return DNN_ERROR;
211  }
212 
213  if (av_image_fill_linesizes(linesizes, fmt, input->width) < 0) {
214  av_log(log_ctx, AV_LOG_ERROR, "unable to get linesizes with av_image_fill_linesizes");
215  sws_freeContext(sws_ctx);
216  return DNN_ERROR;
217  }
218 
219  desc = av_pix_fmt_desc_get(frame->format);
220  offsetx[1] = offsetx[2] = AV_CEIL_RSHIFT(left, desc->log2_chroma_w);
221  offsetx[0] = offsetx[3] = left;
222 
223  offsety[1] = offsety[2] = AV_CEIL_RSHIFT(top, desc->log2_chroma_h);
224  offsety[0] = offsety[3] = top;
225 
226  for (int k = 0; frame->data[k]; k++)
227  bbox_data[k] = frame->data[k] + offsety[k] * frame->linesize[k] + offsetx[k];
228 
229  sws_scale(sws_ctx, (const uint8_t *const *)&bbox_data, frame->linesize,
230  0, height,
231  (uint8_t *const *)(&input->data), linesizes);
232 
233  sws_freeContext(sws_ctx);
234 
235  return DNN_SUCCESS;
236 }
237 
239 {
240  struct SwsContext *sws_ctx;
241  int linesizes[4];
243  sws_ctx = sws_getContext(frame->width, frame->height, frame->format,
244  input->width, input->height, fmt,
246  if (!sws_ctx) {
247  av_log(log_ctx, AV_LOG_ERROR, "Impossible to create scale context for the conversion "
248  "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
249  av_get_pix_fmt_name(frame->format), frame->width, frame->height,
250  av_get_pix_fmt_name(fmt), input->width, input->height);
251  return DNN_ERROR;
252  }
253 
254  if (av_image_fill_linesizes(linesizes, fmt, input->width) < 0) {
255  av_log(log_ctx, AV_LOG_ERROR, "unable to get linesizes with av_image_fill_linesizes");
256  sws_freeContext(sws_ctx);
257  return DNN_ERROR;
258  }
259 
260  sws_scale(sws_ctx, (const uint8_t *const *)frame->data, frame->linesize, 0, frame->height,
261  (uint8_t *const *)(&input->data), linesizes);
262 
263  sws_freeContext(sws_ctx);
264  return DNN_SUCCESS;
265 }
266 
268 {
269  switch (func_type)
270  {
271  case DFT_PROCESS_FRAME:
274  return proc_from_frame_to_dnn_analytics(frame, input, log_ctx);
275  default:
276  avpriv_report_missing_feature(log_ctx, "model function type %d", func_type);
277  return DNN_ERROR;
278  }
279 }
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:616
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2541
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:51
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
data
const char data[16]
Definition: mxf.c:142
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:745
dnn_io_proc.h
proc_from_frame_to_dnn_analytics
static DNNReturnType proc_from_frame_to_dnn_analytics(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:238
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:371
SWS_FAST_BILINEAR
#define SWS_FAST_BILINEAR
Definition: swscale.h:58
ff_proc_from_dnn_to_frame
DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:27
DNN_SUCCESS
@ DNN_SUCCESS
Definition: dnn_interface.h:33
get_pixel_format
static enum AVPixelFormat get_pixel_format(DNNData *data)
Definition: dnn_io_proc.c:169
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
width
#define width
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
DNNReturnType
DNNReturnType
Definition: dnn_interface.h:33
DNNData
Definition: dnn_interface.h:58
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AV_PIX_FMT_GRAYF32
#define AV_PIX_FMT_GRAYF32
Definition: pixfmt.h:421
NULL
#define NULL
Definition: coverity.c:32
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
sws_getContext
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
Definition: utils.c:1910
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:211
header
static const uint8_t header[24]
Definition: sdr2.c:67
height
#define height
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
ff_proc_from_frame_to_dnn
DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, DNNFunctionType func_type, void *log_ctx)
Definition: dnn_io_proc.c:267
av_image_get_linesize
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
Definition: imgutils.c:76
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:54
ff_frame_to_dnn_classify
DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:179
DNN_ERROR
@ DNN_ERROR
Definition: dnn_interface.h:33
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2241
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
desc
const char * desc
Definition: libsvtav1.c:79
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:70
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
imgutils.h
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVDetectionBBox
Definition: detection_bbox.h:26
proc_from_frame_to_dnn_frameprocessing
static DNNReturnType proc_from_frame_to_dnn_frameprocessing(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:97
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:41
SwsContext
Definition: swscale_internal.h:283
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:53
detection_bbox.h
swscale.h
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:189
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2461