FFmpeg
rawdec.c
Go to the documentation of this file.
1 /*
2  * Raw Video Decoder
3  * Copyright (c) 2001 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Raw Video Decoder
25  */
26 
27 #include "avcodec.h"
28 #include "bswapdsp.h"
29 #include "decode.h"
30 #include "get_bits.h"
31 #include "internal.h"
32 #include "raw.h"
33 #include "libavutil/avassert.h"
34 #include "libavutil/buffer.h"
35 #include "libavutil/common.h"
36 #include "libavutil/intreadwrite.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/opt.h"
39 
40 typedef struct RawVideoContext {
43  int frame_size; /* size of the frame in bytes */
44  int flip;
45  int is_1_2_4_8_bpp; // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
46  int is_mono;
47  int is_pal8;
50  int is_yuv2;
51  int is_lt_16bpp; // 16bpp pixfmt and bits_per_coded_sample < 16
52  int tff;
53 
56  unsigned int bitstream_buf_size;
58 
59 static const AVOption options[]={
60 {"top", "top field first", offsetof(RawVideoContext, tff), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_VIDEO_PARAM},
61 {NULL}
62 };
63 
64 static const AVClass rawdec_class = {
65  .class_name = "rawdec",
66  .option = options,
67  .version = LIBAVUTIL_VERSION_INT,
68 };
69 
71 {
73  const AVPixFmtDescriptor *desc;
74 
75  ff_bswapdsp_init(&context->bbdsp);
76 
77  if ( avctx->codec_tag == MKTAG('r','a','w',' ')
78  || avctx->codec_tag == MKTAG('N','O','1','6'))
80  avctx->bits_per_coded_sample);
81  else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
83  avctx->bits_per_coded_sample);
84  else if (avctx->codec_tag && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0))
86  else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
88  avctx->bits_per_coded_sample);
89 
91  if (!desc) {
92  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
93  return AVERROR(EINVAL);
94  }
95 
96  if (desc->flags & (AV_PIX_FMT_FLAG_PAL | FF_PSEUDOPAL)) {
98  if (!context->palette)
99  return AVERROR(ENOMEM);
100 #if FF_API_PSEUDOPAL
101  if (desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
102  avpriv_set_systematic_pal2((uint32_t*)context->palette->data, avctx->pix_fmt);
103 #endif
104  else {
105  memset(context->palette->data, 0, AVPALETTE_SIZE);
106  if (avctx->bits_per_coded_sample == 1)
107  memset(context->palette->data, 0xff, 4);
108  }
109  }
110 
111  if ((avctx->extradata_size >= 9 &&
112  !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
113  avctx->codec_tag == MKTAG('c','y','u','v') ||
114  avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
115  avctx->codec_tag == MKTAG('W','R','A','W'))
116  context->flip = 1;
117 
118  if (avctx->pix_fmt == AV_PIX_FMT_MONOWHITE ||
119  avctx->pix_fmt == AV_PIX_FMT_MONOBLACK)
120  context->is_mono = 1;
121  else if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
122  context->is_pal8 = 1;
123 
124  if (avctx->codec_tag == MKTAG('B','1','W','0') ||
125  avctx->codec_tag == MKTAG('B','0','W','1'))
126  context->is_nut_mono = 1;
127  else if (avctx->codec_tag == MKTAG('P','A','L',8))
128  context->is_nut_pal8 = 1;
129 
130  if (avctx->codec_tag == AV_RL32("yuv2") &&
131  avctx->pix_fmt == AV_PIX_FMT_YUYV422)
132  context->is_yuv2 = 1;
133 
134  return 0;
135 }
136 
137 static void flip(AVCodecContext *avctx, AVFrame *frame)
138 {
139  frame->data[0] += frame->linesize[0] * (avctx->height - 1);
140  frame->linesize[0] *= -1;
141 }
142 
143 /*
144  * Scale sample to 16-bit resolution
145  */
146 #define SCALE16(x, bits) (((x) << (16 - (bits))) | ((x) >> (2 * (bits) - 16)))
147 
148 /**
149  * Scale buffer to 16 bits per coded sample resolution
150  */
151 #define MKSCALE16(name, r16, w16) \
152 static void name(AVCodecContext *avctx, uint8_t * dst, const uint8_t *buf, int buf_size, int packed) \
153 { \
154  int i; \
155  if (!packed) { \
156  for (i = 0; i + 1 < buf_size; i += 2) \
157  w16(dst + i, SCALE16(r16(buf + i), avctx->bits_per_coded_sample)); \
158  } else { \
159  GetBitContext gb; \
160  init_get_bits(&gb, buf, buf_size * 8); \
161  for (i = 0; i < avctx->width * avctx->height; i++) { \
162  int sample = get_bits(&gb, avctx->bits_per_coded_sample); \
163  w16(dst + i*2, SCALE16(sample, avctx->bits_per_coded_sample)); \
164  } \
165  } \
166 }
167 
168 MKSCALE16(scale16be, AV_RB16, AV_WB16)
169 MKSCALE16(scale16le, AV_RL16, AV_WL16)
170 
171 static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
172  AVPacket *avpkt)
173 {
174  const AVPixFmtDescriptor *desc;
175  RawVideoContext *context = avctx->priv_data;
176  const uint8_t *buf = avpkt->data;
177  int buf_size = avpkt->size;
178  int linesize_align = 4;
179  int stride;
180  int res, len;
181  int need_copy;
182 
183  AVFrame *frame = data;
184 
185  if (avctx->width <= 0) {
186  av_log(avctx, AV_LOG_ERROR, "width is not set\n");
187  return AVERROR_INVALIDDATA;
188  }
189  if (avctx->height <= 0) {
190  av_log(avctx, AV_LOG_ERROR, "height is not set\n");
191  return AVERROR_INVALIDDATA;
192  }
193 
194  if (context->is_nut_mono)
195  stride = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
196  else if (context->is_nut_pal8)
197  stride = avctx->width;
198  else
199  stride = avpkt->size / avctx->height;
200 
201  av_log(avctx, AV_LOG_DEBUG, "PACKET SIZE: %d, STRIDE: %d\n", avpkt->size, stride);
202 
203  if (stride == 0 || avpkt->size < stride * avctx->height) {
204  av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
205  return AVERROR_INVALIDDATA;
206  }
207 
208  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
209 
210  if ((avctx->bits_per_coded_sample == 8 || avctx->bits_per_coded_sample == 4 ||
211  avctx->bits_per_coded_sample == 2 || avctx->bits_per_coded_sample == 1 ||
212  (avctx->bits_per_coded_sample == 0 && (context->is_nut_pal8 || context->is_mono)) ) &&
213  (context->is_mono || context->is_pal8) &&
214  (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ') ||
215  context->is_nut_mono || context->is_nut_pal8)) {
216  context->is_1_2_4_8_bpp = 1;
217  if (context->is_mono) {
218  int row_bytes = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
219  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
220  FFALIGN(row_bytes, 16) * 8,
221  avctx->height, 1);
222  } else
223  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
224  FFALIGN(avctx->width, 16),
225  avctx->height, 1);
226  } else {
227  context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample > 8 && avctx->bits_per_coded_sample < 16;
228  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width,
229  avctx->height, 1);
230  }
231  if (context->frame_size < 0)
232  return context->frame_size;
233 
234  need_copy = !avpkt->buf || context->is_1_2_4_8_bpp || context->is_yuv2 || context->is_lt_16bpp;
235 
236  frame->pict_type = AV_PICTURE_TYPE_I;
237  frame->key_frame = 1;
238 
239  res = ff_decode_frame_props(avctx, frame);
240  if (res < 0)
241  return res;
242 
243  frame->pkt_pos = avctx->internal->last_pkt_props->pos;
244  frame->pkt_duration = avctx->internal->last_pkt_props->duration;
245 
246  if (context->tff >= 0) {
247  frame->interlaced_frame = 1;
248  frame->top_field_first = context->tff;
249  }
250 
251  if ((res = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
252  return res;
253 
254  if (need_copy)
255  frame->buf[0] = av_buffer_alloc(FFMAX(context->frame_size, buf_size));
256  else
257  frame->buf[0] = av_buffer_ref(avpkt->buf);
258  if (!frame->buf[0])
259  return AVERROR(ENOMEM);
260 
261  // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
262  if (context->is_1_2_4_8_bpp) {
263  int i, j, row_pix = 0;
264  uint8_t *dst = frame->buf[0]->data;
265  buf_size = context->frame_size - (context->is_pal8 ? AVPALETTE_SIZE : 0);
266  if (avctx->bits_per_coded_sample == 8 || context->is_nut_pal8 || context->is_mono) {
267  int pix_per_byte = context->is_mono ? 8 : 1;
268  for (i = 0, j = 0; j < buf_size && i<avpkt->size; i++, j++) {
269  dst[j] = buf[i];
270  row_pix += pix_per_byte;
271  if (row_pix >= avctx->width) {
272  i += stride - (i % stride) - 1;
273  j += 16 - (j % 16) - 1;
274  row_pix = 0;
275  }
276  }
277  } else if (avctx->bits_per_coded_sample == 4) {
278  for (i = 0, j = 0; 2 * j + 1 < buf_size && i<avpkt->size; i++, j++) {
279  dst[2 * j + 0] = buf[i] >> 4;
280  dst[2 * j + 1] = buf[i] & 15;
281  row_pix += 2;
282  if (row_pix >= avctx->width) {
283  i += stride - (i % stride) - 1;
284  j += 8 - (j % 8) - 1;
285  row_pix = 0;
286  }
287  }
288  } else if (avctx->bits_per_coded_sample == 2) {
289  for (i = 0, j = 0; 4 * j + 3 < buf_size && i<avpkt->size; i++, j++) {
290  dst[4 * j + 0] = buf[i] >> 6;
291  dst[4 * j + 1] = buf[i] >> 4 & 3;
292  dst[4 * j + 2] = buf[i] >> 2 & 3;
293  dst[4 * j + 3] = buf[i] & 3;
294  row_pix += 4;
295  if (row_pix >= avctx->width) {
296  i += stride - (i % stride) - 1;
297  j += 4 - (j % 4) - 1;
298  row_pix = 0;
299  }
300  }
301  } else {
302  av_assert0(avctx->bits_per_coded_sample == 1);
303  for (i = 0, j = 0; 8 * j + 7 < buf_size && i<avpkt->size; i++, j++) {
304  dst[8 * j + 0] = buf[i] >> 7;
305  dst[8 * j + 1] = buf[i] >> 6 & 1;
306  dst[8 * j + 2] = buf[i] >> 5 & 1;
307  dst[8 * j + 3] = buf[i] >> 4 & 1;
308  dst[8 * j + 4] = buf[i] >> 3 & 1;
309  dst[8 * j + 5] = buf[i] >> 2 & 1;
310  dst[8 * j + 6] = buf[i] >> 1 & 1;
311  dst[8 * j + 7] = buf[i] & 1;
312  row_pix += 8;
313  if (row_pix >= avctx->width) {
314  i += stride - (i % stride) - 1;
315  j += 2 - (j % 2) - 1;
316  row_pix = 0;
317  }
318  }
319  }
320  linesize_align = 16;
321  buf = dst;
322  } else if (context->is_lt_16bpp) {
323  uint8_t *dst = frame->buf[0]->data;
324  int packed = (avctx->codec_tag & 0xFFFFFF) == MKTAG('B','I','T', 0);
325  int swap = avctx->codec_tag >> 24;
326 
327  if (packed && swap) {
328  av_fast_padded_malloc(&context->bitstream_buf, &context->bitstream_buf_size, buf_size);
329  if (!context->bitstream_buf)
330  return AVERROR(ENOMEM);
331  if (swap == 16)
332  context->bbdsp.bswap16_buf(context->bitstream_buf, (const uint16_t*)buf, buf_size / 2);
333  else if (swap == 32)
334  context->bbdsp.bswap_buf(context->bitstream_buf, (const uint32_t*)buf, buf_size / 4);
335  else
336  return AVERROR_INVALIDDATA;
337  buf = context->bitstream_buf;
338  }
339 
340  if (desc->flags & AV_PIX_FMT_FLAG_BE)
341  scale16be(avctx, dst, buf, buf_size, packed);
342  else
343  scale16le(avctx, dst, buf, buf_size, packed);
344 
345  buf = dst;
346  } else if (need_copy) {
347  memcpy(frame->buf[0]->data, buf, buf_size);
348  buf = frame->buf[0]->data;
349  }
350 
351  if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
352  avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
353  buf += buf_size - context->frame_size;
354 
355  len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
356  if (buf_size < len && ((avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0) || !need_copy)) {
357  av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
358  av_buffer_unref(&frame->buf[0]);
359  return AVERROR(EINVAL);
360  }
361 
362  if ((res = av_image_fill_arrays(frame->data, frame->linesize,
363  buf, avctx->pix_fmt,
364  avctx->width, avctx->height, 1)) < 0) {
365  av_buffer_unref(&frame->buf[0]);
366  return res;
367  }
368 
369  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
370  buffer_size_t pal_size;
372  &pal_size);
373  int ret;
374 
375  if (pal && pal_size != AVPALETTE_SIZE) {
376  av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", pal_size);
377  pal = NULL;
378  }
379 
380  if (!context->palette)
382  if (!context->palette) {
383  av_buffer_unref(&frame->buf[0]);
384  return AVERROR(ENOMEM);
385  }
386  ret = av_buffer_make_writable(&context->palette);
387  if (ret < 0) {
388  av_buffer_unref(&frame->buf[0]);
389  return ret;
390  }
391 
392  if (pal) {
393  memcpy(context->palette->data, pal, AVPALETTE_SIZE);
394  frame->palette_has_changed = 1;
395  } else if (context->is_nut_pal8) {
396  int vid_size = avctx->width * avctx->height;
397  int pal_size = avpkt->size - vid_size;
398 
399  if (avpkt->size > vid_size && pal_size <= AVPALETTE_SIZE) {
400  pal = avpkt->data + vid_size;
401  memcpy(context->palette->data, pal, pal_size);
402  frame->palette_has_changed = 1;
403  }
404  }
405  }
406 
407  if ((avctx->pix_fmt==AV_PIX_FMT_RGB24 ||
408  avctx->pix_fmt==AV_PIX_FMT_BGR24 ||
409  avctx->pix_fmt==AV_PIX_FMT_GRAY8 ||
410  avctx->pix_fmt==AV_PIX_FMT_RGB555LE ||
411  avctx->pix_fmt==AV_PIX_FMT_RGB555BE ||
412  avctx->pix_fmt==AV_PIX_FMT_RGB565LE ||
413  avctx->pix_fmt==AV_PIX_FMT_MONOWHITE ||
414  avctx->pix_fmt==AV_PIX_FMT_MONOBLACK ||
415  avctx->pix_fmt==AV_PIX_FMT_PAL8) &&
416  FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size)
417  frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align);
418 
419  if (avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') &&
420  FFALIGN(frame->linesize[0], linesize_align) * avctx->height +
421  FFALIGN(frame->linesize[1], linesize_align) * ((avctx->height + 1) / 2) <= buf_size) {
422  int la0 = FFALIGN(frame->linesize[0], linesize_align);
423  frame->data[1] += (la0 - frame->linesize[0]) * avctx->height;
424  frame->linesize[0] = la0;
425  frame->linesize[1] = FFALIGN(frame->linesize[1], linesize_align);
426  }
427 
428  if ((avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) ||
429  (desc->flags & FF_PSEUDOPAL)) {
430  frame->buf[1] = av_buffer_ref(context->palette);
431  if (!frame->buf[1]) {
432  av_buffer_unref(&frame->buf[0]);
433  return AVERROR(ENOMEM);
434  }
435  frame->data[1] = frame->buf[1]->data;
436  }
437 
438  if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
439  ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
440  frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
441 
442  if (context->flip)
443  flip(avctx, frame);
444 
445  if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
446  avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
447  avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
448  avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
449  FFSWAP(uint8_t *, frame->data[1], frame->data[2]);
450 
451  if (avctx->codec_tag == AV_RL32("I420") && (avctx->width+1)*(avctx->height+1) * 3/2 == buf_size) {
452  frame->data[1] = frame->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height;
453  frame->data[2] = frame->data[2] + ((avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height)*5/4;
454  }
455 
456  if (avctx->codec_tag == AV_RL32("yuv2") &&
457  avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
458  int x, y;
459  uint8_t *line = frame->data[0];
460  for (y = 0; y < avctx->height; y++) {
461  for (x = 0; x < avctx->width; x++)
462  line[2 * x + 1] ^= 0x80;
463  line += frame->linesize[0];
464  }
465  }
466 
467  if (avctx->codec_tag == AV_RL32("b64a") &&
468  avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
469  uint8_t *dst = frame->data[0];
470  uint64_t v;
471  int x, y;
472  for (y = 0; y < avctx->height; y++) {
473  for (x = 0; x >> 3 < avctx->width; x += 8) {
474  v = AV_RB64(&dst[x]);
475  AV_WB64(&dst[x], v << 16 | v >> 48);
476  }
477  dst += frame->linesize[0];
478  }
479  }
480 
481  if (avctx->field_order > AV_FIELD_PROGRESSIVE) { /* we have interlaced material flagged in container */
482  frame->interlaced_frame = 1;
483  if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
484  frame->top_field_first = 1;
485  }
486 
487  *got_frame = 1;
488  return buf_size;
489 }
490 
492 {
494 
495  av_buffer_unref(&context->palette);
496  av_freep(&context->bitstream_buf);
497  return 0;
498 }
499 
501  .name = "rawvideo",
502  .long_name = NULL_IF_CONFIG_SMALL("raw video"),
503  .type = AVMEDIA_TYPE_VIDEO,
504  .id = AV_CODEC_ID_RAWVIDEO,
505  .priv_data_size = sizeof(RawVideoContext),
507  .close = raw_close_decoder,
508  .decode = raw_decode,
509  .priv_class = &rawdec_class,
510  .capabilities = AV_CODEC_CAP_PARAM_CHANGE,
511 };
raw_init_decoder
static av_cold int raw_init_decoder(AVCodecContext *avctx)
Definition: rawdec.c:70
RawVideoContext::frame_size
int frame_size
Definition: rawdec.c:43
AVCodec
AVCodec.
Definition: codec.h:197
bswapdsp.h
stride
int stride
Definition: mace.c:144
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
av_buffer_alloc
AVBufferRef * av_buffer_alloc(buffer_size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:281
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t *size)
Definition: avpacket.c:368
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:108
RawVideoContext::flip
int flip
Definition: rawdec.c:44
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:478
AV_CODEC_ID_RAWVIDEO
@ AV_CODEC_ID_RAWVIDEO
Definition: codec_id.h:62
RawVideoContext::is_nut_mono
int is_nut_mono
Definition: rawdec.c:48
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
RawVideoContext::bbdsp
BswapDSPContext bbdsp
Definition: rawdec.c:54
internal.h
RawVideoContext::bitstream_buf_size
unsigned int bitstream_buf_size
Definition: rawdec.c:56
AVOption
AVOption.
Definition: opt.h:248
AV_PKT_DATA_PALETTE
@ AV_PKT_DATA_PALETTE
An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE bytes worth of palette.
Definition: packet.h:46
data
const char data[16]
Definition: mxf.c:142
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:75
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
av_get_bits_per_pixel
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:2525
AV_PIX_FMT_RGB555BE
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:107
AV_WB64
#define AV_WB64(p, v)
Definition: intreadwrite.h:433
AV_FIELD_TT
@ AV_FIELD_TT
Definition: codec_par.h:39
AV_FIELD_TB
@ AV_FIELD_TB
Definition: codec_par.h:41
raw.h
avassert.h
RawVideoContext::is_lt_16bpp
int is_lt_16bpp
Definition: rawdec.c:51
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_cold
#define av_cold
Definition: attributes.h:90
ff_raw_pix_fmt_tags
const PixelFormatTag ff_raw_pix_fmt_tags[]
Definition: raw.c:31
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:638
options
static const AVOption options[]
Definition: rawdec.c:59
intreadwrite.h
buffer_size_t
int buffer_size_t
Definition: internal.h:306
frame_size
int frame_size
Definition: mxfenc.c:2206
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
avpriv_set_systematic_pal2
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:176
avpriv_find_pix_fmt
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc)
Definition: utils.c:438
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
RawVideoContext::is_pal8
int is_pal8
Definition: rawdec.c:47
RawVideoContext
Definition: rawdec.c:40
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
RawVideoContext::is_1_2_4_8_bpp
int is_1_2_4_8_bpp
Definition: rawdec.c:45
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
RawVideoContext::palette
AVBufferRef * palette
Definition: rawdec.c:42
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
RawVideoContext::is_yuv2
int is_yuv2
Definition: rawdec.c:50
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
RawVideoContext::bitstream_buf
void * bitstream_buf
Definition: rawdec.c:55
flip
static void flip(AVCodecContext *avctx, AVFrame *frame)
Definition: rawdec.c:137
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_image_fill_arrays
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
Definition: imgutils.c:446
AV_PIX_FMT_FLAG_PSEUDOPAL
#define AV_PIX_FMT_FLAG_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:167
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
avpriv_pix_fmt_bps_mov
const PixelFormatTag avpriv_pix_fmt_bps_mov[]
Definition: raw.c:329
av_buffer_make_writable
int av_buffer_make_writable(AVBufferRef **pbuf)
Create a writable reference from a given buffer reference, avoiding data copy if possible.
Definition: buffer.c:151
MKSCALE16
#define MKSCALE16(name, r16, w16)
Scale buffer to 16 bits per coded sample resolution.
Definition: rawdec.c:151
buffer.h
RawVideoContext::is_nut_pal8
int is_nut_pal8
Definition: rawdec.c:49
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
line
Definition: graph2dot.c:48
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1740
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
i
int i
Definition: input.c:407
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
raw_decode
static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: rawdec.c:171
common.h
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:279
AV_PIX_FMT_FLAG_BE
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:128
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
len
int len
Definition: vorbis_enc_data.h:452
AVCodecContext::height
int height
Definition: avcodec.h:709
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
raw_close_decoder
static av_cold int raw_close_decoder(AVCodecContext *avctx)
Definition: rawdec.c:491
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1731
AVCodecContext
main external API structure.
Definition: avcodec.h:536
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avpriv_pix_fmt_bps_avi
const PixelFormatTag avpriv_pix_fmt_bps_avi[]
Definition: raw.c:316
rawdec_class
static const AVClass rawdec_class
Definition: rawdec.c:64
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:116
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
RawVideoContext::tff
int tff
Definition: rawdec.c:52
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:84
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:561
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: packet.h:346
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
imgutils.h
RawVideoContext::av_class
AVClass * av_class
Definition: rawdec.c:41
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FF_PSEUDOPAL
#define FF_PSEUDOPAL
Definition: internal.h:299
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
BswapDSPContext
Definition: bswapdsp.h:24
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
RawVideoContext::is_mono
int is_mono
Definition: rawdec.c:46
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
AV_RB64
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_RB64
Definition: bytestream.h:95
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
ff_rawvideo_decoder
AVCodec ff_rawvideo_decoder
Definition: rawdec.c:500