FFmpeg
rawdec.c
Go to the documentation of this file.
1 /*
2  * Raw Video Decoder
3  * Copyright (c) 2001 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Raw Video Decoder
25  */
26 
27 #include "avcodec.h"
28 #include "bswapdsp.h"
29 #include "codec_internal.h"
30 #include "decode.h"
31 #include "get_bits.h"
32 #include "internal.h"
33 #include "raw.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/buffer.h"
36 #include "libavutil/common.h"
37 #include "libavutil/intreadwrite.h"
38 #include "libavutil/imgutils.h"
39 #include "libavutil/opt.h"
40 
41 typedef struct RawVideoContext {
44  int frame_size; /* size of the frame in bytes */
45  int flip;
46  int is_1_2_4_8_bpp; // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
47  int is_mono;
48  int is_pal8;
51  int is_yuv2;
52  int is_lt_16bpp; // 16bpp pixfmt and bits_per_coded_sample < 16
53  int tff;
54 
57  unsigned int bitstream_buf_size;
59 
60 static const AVOption options[]={
61 {"top", "top field first", offsetof(RawVideoContext, tff), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_VIDEO_PARAM},
62 {NULL}
63 };
64 
65 static const AVClass rawdec_class = {
66  .class_name = "rawdec",
67  .option = options,
68  .version = LIBAVUTIL_VERSION_INT,
69 };
70 
72 {
74  const AVPixFmtDescriptor *desc;
75 
76  ff_bswapdsp_init(&context->bbdsp);
77 
78  if ( avctx->codec_tag == MKTAG('r','a','w',' ')
79  || avctx->codec_tag == MKTAG('N','O','1','6'))
81  avctx->bits_per_coded_sample);
82  else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
84  avctx->bits_per_coded_sample);
85  else if (avctx->codec_tag && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0))
87  else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
89  avctx->bits_per_coded_sample);
90 
92  if (!desc) {
93  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
94  return AVERROR(EINVAL);
95  }
96 
97  if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
99  if (!context->palette)
100  return AVERROR(ENOMEM);
101  memset(context->palette->data, 0, AVPALETTE_SIZE);
102  if (avctx->bits_per_coded_sample == 1)
103  memset(context->palette->data, 0xff, 4);
104  }
105 
106  if ((avctx->extradata_size >= 9 &&
107  !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
108  avctx->codec_tag == MKTAG('c','y','u','v') ||
109  avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
110  avctx->codec_tag == MKTAG('W','R','A','W'))
111  context->flip = 1;
112 
113  if (avctx->pix_fmt == AV_PIX_FMT_MONOWHITE ||
114  avctx->pix_fmt == AV_PIX_FMT_MONOBLACK)
115  context->is_mono = 1;
116  else if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
117  context->is_pal8 = 1;
118 
119  if (avctx->codec_tag == MKTAG('B','1','W','0') ||
120  avctx->codec_tag == MKTAG('B','0','W','1'))
121  context->is_nut_mono = 1;
122  else if (avctx->codec_tag == MKTAG('P','A','L',8))
123  context->is_nut_pal8 = 1;
124 
125  if (avctx->codec_tag == AV_RL32("yuv2") &&
126  avctx->pix_fmt == AV_PIX_FMT_YUYV422)
127  context->is_yuv2 = 1;
128 
129  return 0;
130 }
131 
132 static void flip(AVCodecContext *avctx, AVFrame *frame)
133 {
134  frame->data[0] += frame->linesize[0] * (avctx->height - 1);
135  frame->linesize[0] *= -1;
136 }
137 
138 /*
139  * Scale sample to 16-bit resolution
140  */
141 #define SCALE16(x, bits) (((x) << (16 - (bits))) | ((x) >> (2 * (bits) - 16)))
142 
143 /**
144  * Scale buffer to 16 bits per coded sample resolution
145  */
146 #define MKSCALE16(name, r16, w16) \
147 static void name(AVCodecContext *avctx, uint8_t * dst, const uint8_t *buf, int buf_size, int packed) \
148 { \
149  int i; \
150  if (!packed) { \
151  for (i = 0; i + 1 < buf_size; i += 2) \
152  w16(dst + i, SCALE16(r16(buf + i), avctx->bits_per_coded_sample)); \
153  } else { \
154  GetBitContext gb; \
155  init_get_bits(&gb, buf, buf_size * 8); \
156  for (i = 0; i < avctx->width * avctx->height; i++) { \
157  int sample = get_bits(&gb, avctx->bits_per_coded_sample); \
158  w16(dst + i*2, SCALE16(sample, avctx->bits_per_coded_sample)); \
159  } \
160  } \
161 }
162 
163 MKSCALE16(scale16be, AV_RB16, AV_WB16)
164 MKSCALE16(scale16le, AV_RL16, AV_WL16)
165 
167  int *got_frame, AVPacket *avpkt)
168 {
169  const AVPixFmtDescriptor *desc;
170  RawVideoContext *context = avctx->priv_data;
171  const uint8_t *buf = avpkt->data;
172  int buf_size = avpkt->size;
173  int linesize_align = 4;
174  int stride;
175  int res, len;
176  int need_copy;
177 
178  if (avctx->width <= 0) {
179  av_log(avctx, AV_LOG_ERROR, "width is not set\n");
180  return AVERROR_INVALIDDATA;
181  }
182  if (avctx->height <= 0) {
183  av_log(avctx, AV_LOG_ERROR, "height is not set\n");
184  return AVERROR_INVALIDDATA;
185  }
186 
187  if (context->is_nut_mono)
188  stride = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
189  else if (context->is_nut_pal8)
190  stride = avctx->width;
191  else
192  stride = avpkt->size / avctx->height;
193 
194  av_log(avctx, AV_LOG_DEBUG, "PACKET SIZE: %d, STRIDE: %d\n", avpkt->size, stride);
195 
196  if (stride == 0 || avpkt->size < stride * avctx->height) {
197  av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
198  return AVERROR_INVALIDDATA;
199  }
200 
201  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
202 
203  if ((avctx->bits_per_coded_sample == 8 || avctx->bits_per_coded_sample == 4 ||
204  avctx->bits_per_coded_sample == 2 || avctx->bits_per_coded_sample == 1 ||
205  (avctx->bits_per_coded_sample == 0 && (context->is_nut_pal8 || context->is_mono)) ) &&
206  (context->is_mono || context->is_pal8) &&
207  (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ') ||
208  context->is_nut_mono || context->is_nut_pal8)) {
209  context->is_1_2_4_8_bpp = 1;
210  if (context->is_mono) {
211  int row_bytes = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
212  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
213  FFALIGN(row_bytes, 16) * 8,
214  avctx->height, 1);
215  } else
216  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
217  FFALIGN(avctx->width, 16),
218  avctx->height, 1);
219  } else {
220  context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample > 8 && avctx->bits_per_coded_sample < 16;
221  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width,
222  avctx->height, 1);
223  }
224  if (context->frame_size < 0)
225  return context->frame_size;
226 
227  need_copy = !avpkt->buf || context->is_1_2_4_8_bpp || context->is_yuv2 || context->is_lt_16bpp;
228 
231 
232  res = ff_decode_frame_props(avctx, frame);
233  if (res < 0)
234  return res;
235 
236  if (context->tff >= 0) {
238  if (context->tff == 1)
240  }
241 
242  if ((res = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
243  return res;
244 
245  if (need_copy)
246  frame->buf[0] = av_buffer_alloc(FFMAX(context->frame_size, buf_size));
247  else
248  frame->buf[0] = av_buffer_ref(avpkt->buf);
249  if (!frame->buf[0])
250  return AVERROR(ENOMEM);
251 
252  // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
253  if (context->is_1_2_4_8_bpp) {
254  int i, j, row_pix = 0;
255  uint8_t *dst = frame->buf[0]->data;
256  buf_size = context->frame_size - (context->is_pal8 ? AVPALETTE_SIZE : 0);
257  if (avctx->bits_per_coded_sample == 8 || context->is_nut_pal8 || context->is_mono) {
258  int pix_per_byte = context->is_mono ? 8 : 1;
259  for (i = 0, j = 0; j < buf_size && i<avpkt->size; i++, j++) {
260  dst[j] = buf[i];
261  row_pix += pix_per_byte;
262  if (row_pix >= avctx->width) {
263  i += stride - (i % stride) - 1;
264  j += 16 - (j % 16) - 1;
265  row_pix = 0;
266  }
267  }
268  } else if (avctx->bits_per_coded_sample == 4) {
269  for (i = 0, j = 0; 2 * j + 1 < buf_size && i<avpkt->size; i++, j++) {
270  dst[2 * j + 0] = buf[i] >> 4;
271  dst[2 * j + 1] = buf[i] & 15;
272  row_pix += 2;
273  if (row_pix >= avctx->width) {
274  i += stride - (i % stride) - 1;
275  j += 8 - (j % 8) - 1;
276  row_pix = 0;
277  }
278  }
279  } else if (avctx->bits_per_coded_sample == 2) {
280  for (i = 0, j = 0; 4 * j + 3 < buf_size && i<avpkt->size; i++, j++) {
281  dst[4 * j + 0] = buf[i] >> 6;
282  dst[4 * j + 1] = buf[i] >> 4 & 3;
283  dst[4 * j + 2] = buf[i] >> 2 & 3;
284  dst[4 * j + 3] = buf[i] & 3;
285  row_pix += 4;
286  if (row_pix >= avctx->width) {
287  i += stride - (i % stride) - 1;
288  j += 4 - (j % 4) - 1;
289  row_pix = 0;
290  }
291  }
292  } else {
293  av_assert0(avctx->bits_per_coded_sample == 1);
294  for (i = 0, j = 0; 8 * j + 7 < buf_size && i<avpkt->size; i++, j++) {
295  dst[8 * j + 0] = buf[i] >> 7;
296  dst[8 * j + 1] = buf[i] >> 6 & 1;
297  dst[8 * j + 2] = buf[i] >> 5 & 1;
298  dst[8 * j + 3] = buf[i] >> 4 & 1;
299  dst[8 * j + 4] = buf[i] >> 3 & 1;
300  dst[8 * j + 5] = buf[i] >> 2 & 1;
301  dst[8 * j + 6] = buf[i] >> 1 & 1;
302  dst[8 * j + 7] = buf[i] & 1;
303  row_pix += 8;
304  if (row_pix >= avctx->width) {
305  i += stride - (i % stride) - 1;
306  j += 2 - (j % 2) - 1;
307  row_pix = 0;
308  }
309  }
310  }
311  linesize_align = 16;
312  buf = dst;
313  } else if (context->is_lt_16bpp) {
314  uint8_t *dst = frame->buf[0]->data;
315  int packed = (avctx->codec_tag & 0xFFFFFF) == MKTAG('B','I','T', 0);
316  int swap = avctx->codec_tag >> 24;
317 
318  if (packed && swap) {
319  av_fast_padded_malloc(&context->bitstream_buf, &context->bitstream_buf_size, buf_size);
320  if (!context->bitstream_buf)
321  return AVERROR(ENOMEM);
322  if (swap == 16)
323  context->bbdsp.bswap16_buf(context->bitstream_buf, (const uint16_t*)buf, buf_size / 2);
324  else if (swap == 32)
325  context->bbdsp.bswap_buf(context->bitstream_buf, (const uint32_t*)buf, buf_size / 4);
326  else
327  return AVERROR_INVALIDDATA;
328  buf = context->bitstream_buf;
329  }
330 
331  if (desc->flags & AV_PIX_FMT_FLAG_BE)
332  scale16be(avctx, dst, buf, buf_size, packed);
333  else
334  scale16le(avctx, dst, buf, buf_size, packed);
335 
336  buf = dst;
337  } else if (need_copy) {
338  memcpy(frame->buf[0]->data, buf, buf_size);
339  buf = frame->buf[0]->data;
340  }
341 
342  if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
343  avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
344  buf += buf_size - context->frame_size;
345 
346  len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
347  if (buf_size < len && ((avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0) || !need_copy)) {
348  av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
349  av_buffer_unref(&frame->buf[0]);
350  return AVERROR(EINVAL);
351  }
352 
354  buf, avctx->pix_fmt,
355  avctx->width, avctx->height, 1)) < 0) {
356  av_buffer_unref(&frame->buf[0]);
357  return res;
358  }
359 
360  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
361  int ret;
362 
363  if (!context->palette)
365  if (!context->palette) {
366  av_buffer_unref(&frame->buf[0]);
367  return AVERROR(ENOMEM);
368  }
369  ret = av_buffer_make_writable(&context->palette);
370  if (ret < 0) {
371  av_buffer_unref(&frame->buf[0]);
372  return ret;
373  }
374 
375  if (ff_copy_palette(context->palette->data, avpkt, avctx)) {
376 #if FF_API_PALETTE_HAS_CHANGED
380 #endif
381  } else if (context->is_nut_pal8) {
382  int vid_size = avctx->width * avctx->height;
383  int pal_size = avpkt->size - vid_size;
384 
385  if (avpkt->size > vid_size && pal_size <= AVPALETTE_SIZE) {
386  const uint8_t *pal = avpkt->data + vid_size;
387  memcpy(context->palette->data, pal, pal_size);
388 #if FF_API_PALETTE_HAS_CHANGED
392 #endif
393  }
394  }
395  }
396 
397  if ((avctx->pix_fmt==AV_PIX_FMT_RGB24 ||
398  avctx->pix_fmt==AV_PIX_FMT_BGR24 ||
399  avctx->pix_fmt==AV_PIX_FMT_GRAY8 ||
400  avctx->pix_fmt==AV_PIX_FMT_RGB555LE ||
401  avctx->pix_fmt==AV_PIX_FMT_RGB555BE ||
402  avctx->pix_fmt==AV_PIX_FMT_RGB565LE ||
403  avctx->pix_fmt==AV_PIX_FMT_MONOWHITE ||
404  avctx->pix_fmt==AV_PIX_FMT_MONOBLACK ||
405  avctx->pix_fmt==AV_PIX_FMT_PAL8) &&
406  FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size)
407  frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align);
408 
409  if (avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') &&
410  FFALIGN(frame->linesize[0], linesize_align) * avctx->height +
411  FFALIGN(frame->linesize[1], linesize_align) * ((avctx->height + 1) / 2) <= buf_size) {
412  int la0 = FFALIGN(frame->linesize[0], linesize_align);
413  frame->data[1] += (la0 - frame->linesize[0]) * avctx->height;
414  frame->linesize[0] = la0;
415  frame->linesize[1] = FFALIGN(frame->linesize[1], linesize_align);
416  }
417 
418  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) {
419  frame->buf[1] = av_buffer_ref(context->palette);
420  if (!frame->buf[1]) {
421  av_buffer_unref(&frame->buf[0]);
422  return AVERROR(ENOMEM);
423  }
424  frame->data[1] = frame->buf[1]->data;
425  }
426 
427  if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
428  ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
429  frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
430 
431  if (context->flip)
432  flip(avctx, frame);
433 
434  if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
435  avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
436  avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
437  avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
438  FFSWAP(uint8_t *, frame->data[1], frame->data[2]);
439 
440  if (avctx->codec_tag == AV_RL32("I420") && (avctx->width+1)*(avctx->height+1) * 3/2 == buf_size) {
441  frame->data[1] = frame->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height;
442  frame->data[2] = frame->data[2] + ((avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height)*5/4;
443  }
444 
445  if (avctx->codec_tag == AV_RL32("yuv2") &&
446  avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
447  int x, y;
448  uint8_t *line = frame->data[0];
449  for (y = 0; y < avctx->height; y++) {
450  for (x = 0; x < avctx->width; x++)
451  line[2 * x + 1] ^= 0x80;
452  line += frame->linesize[0];
453  }
454  }
455 
456  if (avctx->codec_tag == AV_RL32("b64a") &&
457  avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
458  uint8_t *dst = frame->data[0];
459  uint64_t v;
460  int x, y;
461  for (y = 0; y < avctx->height; y++) {
462  for (x = 0; x >> 3 < avctx->width; x += 8) {
463  v = AV_RB64(&dst[x]);
464  AV_WB64(&dst[x], v << 16 | v >> 48);
465  }
466  dst += frame->linesize[0];
467  }
468  }
469 
470  if (avctx->field_order > AV_FIELD_PROGRESSIVE) { /* we have interlaced material flagged in container */
472  if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
474  }
475 
476  *got_frame = 1;
477  return buf_size;
478 }
479 
481 {
483 
484  av_buffer_unref(&context->palette);
485  av_freep(&context->bitstream_buf);
486  return 0;
487 }
488 
490  .p.name = "rawvideo",
491  CODEC_LONG_NAME("raw video"),
492  .p.type = AVMEDIA_TYPE_VIDEO,
493  .p.id = AV_CODEC_ID_RAWVIDEO,
494  .priv_data_size = sizeof(RawVideoContext),
496  .close = raw_close_decoder,
498  .p.priv_class = &rawdec_class,
499  .p.capabilities = AV_CODEC_CAP_PARAM_CHANGE,
500 };
raw_init_decoder
static av_cold int raw_init_decoder(AVCodecContext *avctx)
Definition: rawdec.c:71
RawVideoContext::frame_size
int frame_size
Definition: rawdec.c:44
bswapdsp.h
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
AVFrame::palette_has_changed
attribute_deprecated int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:546
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:284
raw_decode
static int raw_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: rawdec.c:166
RawVideoContext::flip
int flip
Definition: rawdec.c:45
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: defs.h:200
AV_CODEC_ID_RAWVIDEO
@ AV_CODEC_ID_RAWVIDEO
Definition: codec_id.h:65
RawVideoContext::is_nut_mono
int is_nut_mono
Definition: rawdec.c:49
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:195
RawVideoContext::bbdsp
BswapDSPContext bbdsp
Definition: rawdec.c:55
internal.h
RawVideoContext::bitstream_buf_size
unsigned int bitstream_buf_size
Definition: rawdec.c:57
AVOption
AVOption.
Definition: opt.h:251
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:75
FFCodec
Definition: codec_internal.h:127
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
av_get_bits_per_pixel
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:2916
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:649
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:590
AV_PIX_FMT_RGB555BE
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:107
AV_WB64
#define AV_WB64(p, v)
Definition: intreadwrite.h:431
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:641
AV_FIELD_TT
@ AV_FIELD_TT
Top coded_first, top displayed first.
Definition: defs.h:201
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
AV_FIELD_TB
@ AV_FIELD_TB
Top coded first, bottom displayed first.
Definition: defs.h:203
raw.h
avassert.h
RawVideoContext::is_lt_16bpp
int is_lt_16bpp
Definition: rawdec.c:52
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:543
options
static const AVOption options[]
Definition: rawdec.c:60
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
intreadwrite.h
frame_size
int frame_size
Definition: mxfenc.c:2311
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
frame
static AVFrame * frame
Definition: demux_decode.c:54
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
PIX_FMT_LIST_AVI
@ PIX_FMT_LIST_AVI
Definition: raw.h:41
RawVideoContext::is_pal8
int is_pal8
Definition: rawdec.c:48
RawVideoContext
Definition: rawdec.c:41
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
RawVideoContext::is_1_2_4_8_bpp
int is_1_2_4_8_bpp
Definition: rawdec.c:46
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:403
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
RawVideoContext::palette
AVBufferRef * palette
Definition: rawdec.c:43
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
RawVideoContext::is_yuv2
int is_yuv2
Definition: rawdec.c:51
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
RawVideoContext::bitstream_buf
void * bitstream_buf
Definition: rawdec.c:56
flip
static void flip(AVCodecContext *avctx, AVFrame *frame)
Definition: rawdec.c:132
avpriv_pix_fmt_find
enum AVPixelFormat avpriv_pix_fmt_find(enum PixelFormatTagLists list, unsigned fourcc)
Definition: raw.c:355
av_image_fill_arrays
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
Definition: imgutils.c:446
codec_internal.h
PIX_FMT_LIST_RAW
@ PIX_FMT_LIST_RAW
Definition: raw.h:40
av_buffer_make_writable
int av_buffer_make_writable(AVBufferRef **pbuf)
Create a writable reference from a given buffer reference, avoiding data copy if possible.
Definition: buffer.c:165
MKSCALE16
#define MKSCALE16(name, r16, w16)
Scale buffer to 16 bits per coded sample resolution.
Definition: rawdec.c:146
buffer.h
RawVideoContext::is_nut_pal8
int is_nut_pal8
Definition: rawdec.c:50
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:410
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
line
Definition: graph2dot.c:48
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1510
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:542
common.h
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:282
AV_PIX_FMT_FLAG_BE
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:116
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
ff_rawvideo_decoder
const FFCodec ff_rawvideo_decoder
Definition: rawdec.c:489
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:621
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:636
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
raw_close_decoder
static av_cold int raw_close_decoder(AVCodecContext *avctx)
Definition: rawdec.c:480
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1504
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
rawdec_class
static const AVClass rawdec_class
Definition: rawdec.c:65
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:118
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
RawVideoContext::tff
int tff
Definition: rawdec.c:53
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:466
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:468
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
PIX_FMT_LIST_MOV
@ PIX_FMT_LIST_MOV
Definition: raw.h:42
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
imgutils.h
RawVideoContext::av_class
AVClass * av_class
Definition: rawdec.c:42
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
BswapDSPContext
Definition: bswapdsp.h:24
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_copy_palette
int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
Check whether the side-data of src contains a palette of size AVPALETTE_SIZE; if so,...
Definition: decode.c:1823
RawVideoContext::is_mono
int is_mono
Definition: rawdec.c:47
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
AV_RB64
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_RB64
Definition: bytestream.h:95
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98