FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "hwaccel.h"
40 #include "idctdsp.h"
41 #include "internal.h"
42 #include "jpegtables.h"
43 #include "mjpeg.h"
44 #include "mjpegdec.h"
45 #include "jpeglsdec.h"
46 #include "profiles.h"
47 #include "put_bits.h"
48 #include "tiff.h"
49 #include "exif.h"
50 #include "bytestream.h"
51 
52 
53 static int build_vlc(VLC *vlc, const uint8_t *bits_table,
54  const uint8_t *val_table, int nb_codes,
55  int use_static, int is_ac)
56 {
57  uint8_t huff_size[256] = { 0 };
58  uint16_t huff_code[256];
59  uint16_t huff_sym[256];
60  int i;
61 
62  av_assert0(nb_codes <= 256);
63 
64  ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
65 
66  for (i = 0; i < 256; i++)
67  huff_sym[i] = i + 16 * is_ac;
68 
69  if (is_ac)
70  huff_sym[0] = 16 * 256;
71 
72  return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
73  huff_code, 2, 2, huff_sym, 2, 2, use_static);
74 }
75 
77 {
78  static const struct {
79  int class;
80  int index;
81  const uint8_t *bits;
82  const uint8_t *values;
83  int codes;
84  int length;
85  } ht[] = {
87  avpriv_mjpeg_val_dc, 12, 12 },
89  avpriv_mjpeg_val_dc, 12, 12 },
98  };
99  int i, ret;
100 
101  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
102  ret = build_vlc(&s->vlcs[ht[i].class][ht[i].index],
103  ht[i].bits, ht[i].values, ht[i].codes,
104  0, ht[i].class == 1);
105  if (ret < 0)
106  return ret;
107 
108  if (ht[i].class < 2) {
109  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
110  ht[i].bits + 1, 16);
111  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
112  ht[i].values, ht[i].length);
113  }
114  }
115 
116  return 0;
117 }
118 
120 {
121  s->buggy_avid = 1;
122  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
123  s->interlace_polarity = 1;
124  if (len > 14 && buf[12] == 2) /* 2 - PAL */
125  s->interlace_polarity = 0;
126  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
127  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
128 }
129 
130 static void init_idct(AVCodecContext *avctx)
131 {
132  MJpegDecodeContext *s = avctx->priv_data;
133 
134  ff_idctdsp_init(&s->idsp, avctx);
135  ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
137 }
138 
140 {
141  MJpegDecodeContext *s = avctx->priv_data;
142  int ret;
143 
144  if (!s->picture_ptr) {
145  s->picture = av_frame_alloc();
146  if (!s->picture)
147  return AVERROR(ENOMEM);
148  s->picture_ptr = s->picture;
149  }
150 
151  s->avctx = avctx;
152  ff_blockdsp_init(&s->bdsp, avctx);
153  ff_hpeldsp_init(&s->hdsp, avctx->flags);
154  init_idct(avctx);
155  s->buffer_size = 0;
156  s->buffer = NULL;
157  s->start_code = -1;
158  s->first_picture = 1;
159  s->got_picture = 0;
160  s->org_height = avctx->coded_height;
162  avctx->colorspace = AVCOL_SPC_BT470BG;
163  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
164 
165  if ((ret = init_default_huffman_tables(s)) < 0)
166  return ret;
167 
168  if (s->extern_huff) {
169  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
170  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
171  return ret;
172  if (ff_mjpeg_decode_dht(s)) {
173  av_log(avctx, AV_LOG_ERROR,
174  "error using external huffman table, switching back to internal\n");
176  }
177  }
178  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
179  s->interlace_polarity = 1; /* bottom field first */
180  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
181  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
182  if (avctx->codec_tag == AV_RL32("MJPG"))
183  s->interlace_polarity = 1;
184  }
185 
186  if ( avctx->extradata_size > 8
187  && AV_RL32(avctx->extradata) == 0x2C
188  && AV_RL32(avctx->extradata+4) == 0x18) {
189  parse_avid(s, avctx->extradata, avctx->extradata_size);
190  }
191 
192  if (avctx->codec->id == AV_CODEC_ID_AMV)
193  s->flipped = 1;
194 
195  return 0;
196 }
197 
198 
199 /* quantize tables */
201 {
202  int len, index, i;
203 
204  len = get_bits(&s->gb, 16) - 2;
205 
206  if (8*len > get_bits_left(&s->gb)) {
207  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
208  return AVERROR_INVALIDDATA;
209  }
210 
211  while (len >= 65) {
212  int pr = get_bits(&s->gb, 4);
213  if (pr > 1) {
214  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
215  return AVERROR_INVALIDDATA;
216  }
217  index = get_bits(&s->gb, 4);
218  if (index >= 4)
219  return -1;
220  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
221  /* read quant table */
222  for (i = 0; i < 64; i++) {
223  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
224  if (s->quant_matrixes[index][i] == 0) {
225  av_log(s->avctx, AV_LOG_ERROR, "dqt: 0 quant value\n");
226  return AVERROR_INVALIDDATA;
227  }
228  }
229 
230  // XXX FIXME fine-tune, and perhaps add dc too
231  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
232  s->quant_matrixes[index][8]) >> 1;
233  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
234  index, s->qscale[index]);
235  len -= 1 + 64 * (1+pr);
236  }
237  return 0;
238 }
239 
240 /* decode huffman tables and build VLC decoders */
242 {
243  int len, index, i, class, n, v, code_max;
244  uint8_t bits_table[17];
245  uint8_t val_table[256];
246  int ret = 0;
247 
248  len = get_bits(&s->gb, 16) - 2;
249 
250  if (8*len > get_bits_left(&s->gb)) {
251  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
252  return AVERROR_INVALIDDATA;
253  }
254 
255  while (len > 0) {
256  if (len < 17)
257  return AVERROR_INVALIDDATA;
258  class = get_bits(&s->gb, 4);
259  if (class >= 2)
260  return AVERROR_INVALIDDATA;
261  index = get_bits(&s->gb, 4);
262  if (index >= 4)
263  return AVERROR_INVALIDDATA;
264  n = 0;
265  for (i = 1; i <= 16; i++) {
266  bits_table[i] = get_bits(&s->gb, 8);
267  n += bits_table[i];
268  }
269  len -= 17;
270  if (len < n || n > 256)
271  return AVERROR_INVALIDDATA;
272 
273  code_max = 0;
274  for (i = 0; i < n; i++) {
275  v = get_bits(&s->gb, 8);
276  if (v > code_max)
277  code_max = v;
278  val_table[i] = v;
279  }
280  len -= n;
281 
282  /* build VLC and flush previous vlc if present */
283  ff_free_vlc(&s->vlcs[class][index]);
284  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
285  class, index, code_max + 1);
286  if ((ret = build_vlc(&s->vlcs[class][index], bits_table, val_table,
287  code_max + 1, 0, class > 0)) < 0)
288  return ret;
289 
290  if (class > 0) {
291  ff_free_vlc(&s->vlcs[2][index]);
292  if ((ret = build_vlc(&s->vlcs[2][index], bits_table, val_table,
293  code_max + 1, 0, 0)) < 0)
294  return ret;
295  }
296 
297  for (i = 0; i < 16; i++)
298  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
299  for (i = 0; i < 256; i++)
300  s->raw_huffman_values[class][index][i] = val_table[i];
301  }
302  return 0;
303 }
304 
306 {
307  int len, nb_components, i, width, height, bits, ret, size_change;
308  unsigned pix_fmt_id;
309  int h_count[MAX_COMPONENTS] = { 0 };
310  int v_count[MAX_COMPONENTS] = { 0 };
311 
312  s->cur_scan = 0;
313  memset(s->upscale_h, 0, sizeof(s->upscale_h));
314  memset(s->upscale_v, 0, sizeof(s->upscale_v));
315 
316  len = get_bits(&s->gb, 16);
317  bits = get_bits(&s->gb, 8);
318 
319  if (bits > 16 || bits < 1) {
320  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
321  return AVERROR_INVALIDDATA;
322  }
323 
324  if (s->avctx->bits_per_raw_sample != bits) {
325  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
326  s->avctx->bits_per_raw_sample = bits;
327  init_idct(s->avctx);
328  }
329  if (s->pegasus_rct)
330  bits = 9;
331  if (bits == 9 && !s->pegasus_rct)
332  s->rct = 1; // FIXME ugly
333 
334  if(s->lossless && s->avctx->lowres){
335  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
336  return -1;
337  }
338 
339  height = get_bits(&s->gb, 16);
340  width = get_bits(&s->gb, 16);
341 
342  // HACK for odd_height.mov
343  if (s->interlaced && s->width == width && s->height == height + 1)
344  height= s->height;
345 
346  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
347  if (av_image_check_size(width, height, 0, s->avctx) < 0)
348  return AVERROR_INVALIDDATA;
349  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
350  return AVERROR_INVALIDDATA;
351 
352  nb_components = get_bits(&s->gb, 8);
353  if (nb_components <= 0 ||
354  nb_components > MAX_COMPONENTS)
355  return -1;
356  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
357  if (nb_components != s->nb_components) {
358  av_log(s->avctx, AV_LOG_ERROR,
359  "nb_components changing in interlaced picture\n");
360  return AVERROR_INVALIDDATA;
361  }
362  }
363  if (s->ls && !(bits <= 8 || nb_components == 1)) {
365  "JPEG-LS that is not <= 8 "
366  "bits/component or 16-bit gray");
367  return AVERROR_PATCHWELCOME;
368  }
369  if (len != 8 + 3 * nb_components) {
370  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
371  return AVERROR_INVALIDDATA;
372  }
373 
374  s->nb_components = nb_components;
375  s->h_max = 1;
376  s->v_max = 1;
377  for (i = 0; i < nb_components; i++) {
378  /* component id */
379  s->component_id[i] = get_bits(&s->gb, 8) - 1;
380  h_count[i] = get_bits(&s->gb, 4);
381  v_count[i] = get_bits(&s->gb, 4);
382  /* compute hmax and vmax (only used in interleaved case) */
383  if (h_count[i] > s->h_max)
384  s->h_max = h_count[i];
385  if (v_count[i] > s->v_max)
386  s->v_max = v_count[i];
387  s->quant_index[i] = get_bits(&s->gb, 8);
388  if (s->quant_index[i] >= 4) {
389  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
390  return AVERROR_INVALIDDATA;
391  }
392  if (!h_count[i] || !v_count[i]) {
393  av_log(s->avctx, AV_LOG_ERROR,
394  "Invalid sampling factor in component %d %d:%d\n",
395  i, h_count[i], v_count[i]);
396  return AVERROR_INVALIDDATA;
397  }
398 
399  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
400  i, h_count[i], v_count[i],
401  s->component_id[i], s->quant_index[i]);
402  }
403  if ( nb_components == 4
404  && s->component_id[0] == 'C' - 1
405  && s->component_id[1] == 'M' - 1
406  && s->component_id[2] == 'Y' - 1
407  && s->component_id[3] == 'K' - 1)
408  s->adobe_transform = 0;
409 
410  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
411  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
412  return AVERROR_PATCHWELCOME;
413  }
414 
415 
416  /* if different size, realloc/alloc picture */
417  if (width != s->width || height != s->height || bits != s->bits ||
418  memcmp(s->h_count, h_count, sizeof(h_count)) ||
419  memcmp(s->v_count, v_count, sizeof(v_count))) {
420  size_change = 1;
421 
422  s->width = width;
423  s->height = height;
424  s->bits = bits;
425  memcpy(s->h_count, h_count, sizeof(h_count));
426  memcpy(s->v_count, v_count, sizeof(v_count));
427  s->interlaced = 0;
428  s->got_picture = 0;
429 
430  /* test interlaced mode */
431  if (s->first_picture &&
432  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
433  s->org_height != 0 &&
434  s->height < ((s->org_height * 3) / 4)) {
435  s->interlaced = 1;
436  s->bottom_field = s->interlace_polarity;
437  s->picture_ptr->interlaced_frame = 1;
438  s->picture_ptr->top_field_first = !s->interlace_polarity;
439  height *= 2;
440  }
441 
442  ret = ff_set_dimensions(s->avctx, width, height);
443  if (ret < 0)
444  return ret;
445 
446  s->first_picture = 0;
447  } else {
448  size_change = 0;
449  }
450 
451  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
452  if (s->progressive) {
453  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
454  return AVERROR_INVALIDDATA;
455  }
456  } else {
457  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
458  s->rgb = 1;
459  else if (!s->lossless)
460  s->rgb = 0;
461  /* XXX: not complete test ! */
462  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
463  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
464  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
465  (s->h_count[3] << 4) | s->v_count[3];
466  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
467  /* NOTE we do not allocate pictures large enough for the possible
468  * padding of h/v_count being 4 */
469  if (!(pix_fmt_id & 0xD0D0D0D0))
470  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
471  if (!(pix_fmt_id & 0x0D0D0D0D))
472  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
473 
474  for (i = 0; i < 8; i++) {
475  int j = 6 + (i&1) - (i&6);
476  int is = (pix_fmt_id >> (4*i)) & 0xF;
477  int js = (pix_fmt_id >> (4*j)) & 0xF;
478 
479  if (is == 1 && js != 2 && (i < 2 || i > 5))
480  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
481  if (is == 1 && js != 2 && (i < 2 || i > 5))
482  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
483 
484  if (is == 1 && js == 2) {
485  if (i & 1) s->upscale_h[j/2] = 1;
486  else s->upscale_v[j/2] = 1;
487  }
488  }
489 
490  switch (pix_fmt_id) {
491  case 0x11111100:
492  if (s->rgb)
493  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
494  else {
495  if ( s->adobe_transform == 0
496  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
497  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
498  } else {
499  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
500  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
501  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
502  }
503  }
504  av_assert0(s->nb_components == 3);
505  break;
506  case 0x11111111:
507  if (s->rgb)
508  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
509  else {
510  if (s->adobe_transform == 0 && s->bits <= 8) {
511  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
512  } else {
513  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
514  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
515  }
516  }
517  av_assert0(s->nb_components == 4);
518  break;
519  case 0x22111122:
520  case 0x22111111:
521  if (s->adobe_transform == 0 && s->bits <= 8) {
522  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
523  s->upscale_v[1] = s->upscale_v[2] = 1;
524  s->upscale_h[1] = s->upscale_h[2] = 1;
525  } else if (s->adobe_transform == 2 && s->bits <= 8) {
526  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
527  s->upscale_v[1] = s->upscale_v[2] = 1;
528  s->upscale_h[1] = s->upscale_h[2] = 1;
529  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
530  } else {
531  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
532  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
533  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
534  }
535  av_assert0(s->nb_components == 4);
536  break;
537  case 0x12121100:
538  case 0x22122100:
539  case 0x21211100:
540  case 0x22211200:
541  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
542  else
543  goto unk_pixfmt;
544  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
545  break;
546  case 0x22221100:
547  case 0x22112200:
548  case 0x11222200:
549  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
550  else
551  goto unk_pixfmt;
552  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
553  break;
554  case 0x11000000:
555  case 0x13000000:
556  case 0x14000000:
557  case 0x31000000:
558  case 0x33000000:
559  case 0x34000000:
560  case 0x41000000:
561  case 0x43000000:
562  case 0x44000000:
563  if(s->bits <= 8)
564  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
565  else
566  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
567  break;
568  case 0x12111100:
569  case 0x14121200:
570  case 0x14111100:
571  case 0x22211100:
572  case 0x22112100:
573  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
574  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
575  else
576  goto unk_pixfmt;
577  s->upscale_v[0] = s->upscale_v[1] = 1;
578  } else {
579  if (pix_fmt_id == 0x14111100)
580  s->upscale_v[1] = s->upscale_v[2] = 1;
581  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
582  else
583  goto unk_pixfmt;
584  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
585  }
586  break;
587  case 0x21111100:
588  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
589  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
590  else
591  goto unk_pixfmt;
592  s->upscale_h[0] = s->upscale_h[1] = 1;
593  } else {
594  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
595  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
596  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
597  }
598  break;
599  case 0x31111100:
600  if (s->bits > 8)
601  goto unk_pixfmt;
602  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
603  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
604  s->upscale_h[1] = s->upscale_h[2] = 2;
605  break;
606  case 0x22121100:
607  case 0x22111200:
608  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
609  else
610  goto unk_pixfmt;
611  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
612  break;
613  case 0x22111100:
614  case 0x23111100:
615  case 0x42111100:
616  case 0x24111100:
617  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
618  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
619  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
620  if (pix_fmt_id == 0x42111100) {
621  if (s->bits > 8)
622  goto unk_pixfmt;
623  s->upscale_h[1] = s->upscale_h[2] = 1;
624  } else if (pix_fmt_id == 0x24111100) {
625  if (s->bits > 8)
626  goto unk_pixfmt;
627  s->upscale_v[1] = s->upscale_v[2] = 1;
628  } else if (pix_fmt_id == 0x23111100) {
629  if (s->bits > 8)
630  goto unk_pixfmt;
631  s->upscale_v[1] = s->upscale_v[2] = 2;
632  }
633  break;
634  case 0x41111100:
635  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
636  else
637  goto unk_pixfmt;
638  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
639  break;
640  default:
641  unk_pixfmt:
642  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
643  memset(s->upscale_h, 0, sizeof(s->upscale_h));
644  memset(s->upscale_v, 0, sizeof(s->upscale_v));
645  return AVERROR_PATCHWELCOME;
646  }
647  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
648  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
649  return AVERROR_PATCHWELCOME;
650  }
651  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
652  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
653  return AVERROR_PATCHWELCOME;
654  }
655  if (s->ls) {
656  memset(s->upscale_h, 0, sizeof(s->upscale_h));
657  memset(s->upscale_v, 0, sizeof(s->upscale_v));
658  if (s->nb_components == 3) {
659  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
660  } else if (s->nb_components != 1) {
661  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
662  return AVERROR_PATCHWELCOME;
663  } else if (s->palette_index && s->bits <= 8)
664  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
665  else if (s->bits <= 8)
666  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
667  else
668  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
669  }
670 
671  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
672  if (!s->pix_desc) {
673  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
674  return AVERROR_BUG;
675  }
676 
677  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
678  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
679  } else {
680  enum AVPixelFormat pix_fmts[] = {
681 #if CONFIG_MJPEG_NVDEC_HWACCEL
683 #endif
684 #if CONFIG_MJPEG_VAAPI_HWACCEL
686 #endif
687  s->avctx->pix_fmt,
689  };
690  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
691  if (s->hwaccel_pix_fmt < 0)
692  return AVERROR(EINVAL);
693 
694  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
695  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
696  }
697 
698  if (s->avctx->skip_frame == AVDISCARD_ALL) {
699  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
700  s->picture_ptr->key_frame = 1;
701  s->got_picture = 1;
702  return 0;
703  }
704 
705  av_frame_unref(s->picture_ptr);
706  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
707  return -1;
708  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
709  s->picture_ptr->key_frame = 1;
710  s->got_picture = 1;
711 
712  for (i = 0; i < 4; i++)
713  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
714 
715  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
716  s->width, s->height, s->linesize[0], s->linesize[1],
717  s->interlaced, s->avctx->height);
718 
719  }
720 
721  if ((s->rgb && !s->lossless && !s->ls) ||
722  (!s->rgb && s->ls && s->nb_components > 1) ||
723  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
724  ) {
725  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
726  return AVERROR_PATCHWELCOME;
727  }
728 
729  /* totally blank picture as progressive JPEG will only add details to it */
730  if (s->progressive) {
731  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
732  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
733  for (i = 0; i < s->nb_components; i++) {
734  int size = bw * bh * s->h_count[i] * s->v_count[i];
735  av_freep(&s->blocks[i]);
736  av_freep(&s->last_nnz[i]);
737  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
738  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
739  if (!s->blocks[i] || !s->last_nnz[i])
740  return AVERROR(ENOMEM);
741  s->block_stride[i] = bw * s->h_count[i];
742  }
743  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
744  }
745 
746  if (s->avctx->hwaccel) {
747  s->hwaccel_picture_private =
748  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
749  if (!s->hwaccel_picture_private)
750  return AVERROR(ENOMEM);
751 
752  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
753  s->raw_image_buffer_size);
754  if (ret < 0)
755  return ret;
756  }
757 
758  return 0;
759 }
760 
761 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
762 {
763  int code;
764  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
765  if (code < 0 || code > 16) {
766  av_log(s->avctx, AV_LOG_WARNING,
767  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
768  0, dc_index, &s->vlcs[0][dc_index]);
769  return 0xfffff;
770  }
771 
772  if (code)
773  return get_xbits(&s->gb, code);
774  else
775  return 0;
776 }
777 
778 /* decode block and dequantize */
779 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
780  int dc_index, int ac_index, uint16_t *quant_matrix)
781 {
782  int code, i, j, level, val;
783 
784  /* DC coef */
785  val = mjpeg_decode_dc(s, dc_index);
786  if (val == 0xfffff) {
787  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
788  return AVERROR_INVALIDDATA;
789  }
790  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
791  val = av_clip_int16(val);
792  s->last_dc[component] = val;
793  block[0] = val;
794  /* AC coefs */
795  i = 0;
796  {OPEN_READER(re, &s->gb);
797  do {
798  UPDATE_CACHE(re, &s->gb);
799  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
800 
801  i += ((unsigned)code) >> 4;
802  code &= 0xf;
803  if (code) {
804  if (code > MIN_CACHE_BITS - 16)
805  UPDATE_CACHE(re, &s->gb);
806 
807  {
808  int cache = GET_CACHE(re, &s->gb);
809  int sign = (~cache) >> 31;
810  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
811  }
812 
813  LAST_SKIP_BITS(re, &s->gb, code);
814 
815  if (i > 63) {
816  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
817  return AVERROR_INVALIDDATA;
818  }
819  j = s->scantable.permutated[i];
820  block[j] = level * quant_matrix[i];
821  }
822  } while (i < 63);
823  CLOSE_READER(re, &s->gb);}
824 
825  return 0;
826 }
827 
829  int component, int dc_index,
830  uint16_t *quant_matrix, int Al)
831 {
832  unsigned val;
833  s->bdsp.clear_block(block);
834  val = mjpeg_decode_dc(s, dc_index);
835  if (val == 0xfffff) {
836  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
837  return AVERROR_INVALIDDATA;
838  }
839  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
840  s->last_dc[component] = val;
841  block[0] = val;
842  return 0;
843 }
844 
845 /* decode block and dequantize - progressive JPEG version */
847  uint8_t *last_nnz, int ac_index,
848  uint16_t *quant_matrix,
849  int ss, int se, int Al, int *EOBRUN)
850 {
851  int code, i, j, val, run;
852  unsigned level;
853 
854  if (*EOBRUN) {
855  (*EOBRUN)--;
856  return 0;
857  }
858 
859  {
860  OPEN_READER(re, &s->gb);
861  for (i = ss; ; i++) {
862  UPDATE_CACHE(re, &s->gb);
863  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
864 
865  run = ((unsigned) code) >> 4;
866  code &= 0xF;
867  if (code) {
868  i += run;
869  if (code > MIN_CACHE_BITS - 16)
870  UPDATE_CACHE(re, &s->gb);
871 
872  {
873  int cache = GET_CACHE(re, &s->gb);
874  int sign = (~cache) >> 31;
875  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
876  }
877 
878  LAST_SKIP_BITS(re, &s->gb, code);
879 
880  if (i >= se) {
881  if (i == se) {
882  j = s->scantable.permutated[se];
883  block[j] = level * (quant_matrix[se] << Al);
884  break;
885  }
886  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
887  return AVERROR_INVALIDDATA;
888  }
889  j = s->scantable.permutated[i];
890  block[j] = level * (quant_matrix[i] << Al);
891  } else {
892  if (run == 0xF) {// ZRL - skip 15 coefficients
893  i += 15;
894  if (i >= se) {
895  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
896  return AVERROR_INVALIDDATA;
897  }
898  } else {
899  val = (1 << run);
900  if (run) {
901  UPDATE_CACHE(re, &s->gb);
902  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
903  LAST_SKIP_BITS(re, &s->gb, run);
904  }
905  *EOBRUN = val - 1;
906  break;
907  }
908  }
909  }
910  CLOSE_READER(re, &s->gb);
911  }
912 
913  if (i > *last_nnz)
914  *last_nnz = i;
915 
916  return 0;
917 }
918 
919 #define REFINE_BIT(j) { \
920  UPDATE_CACHE(re, &s->gb); \
921  sign = block[j] >> 15; \
922  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
923  ((quant_matrix[i] ^ sign) - sign) << Al; \
924  LAST_SKIP_BITS(re, &s->gb, 1); \
925 }
926 
927 #define ZERO_RUN \
928 for (; ; i++) { \
929  if (i > last) { \
930  i += run; \
931  if (i > se) { \
932  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
933  return -1; \
934  } \
935  break; \
936  } \
937  j = s->scantable.permutated[i]; \
938  if (block[j]) \
939  REFINE_BIT(j) \
940  else if (run-- == 0) \
941  break; \
942 }
943 
944 /* decode block and dequantize - progressive JPEG refinement pass */
946  uint8_t *last_nnz,
947  int ac_index, uint16_t *quant_matrix,
948  int ss, int se, int Al, int *EOBRUN)
949 {
950  int code, i = ss, j, sign, val, run;
951  int last = FFMIN(se, *last_nnz);
952 
953  OPEN_READER(re, &s->gb);
954  if (*EOBRUN) {
955  (*EOBRUN)--;
956  } else {
957  for (; ; i++) {
958  UPDATE_CACHE(re, &s->gb);
959  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
960 
961  if (code & 0xF) {
962  run = ((unsigned) code) >> 4;
963  UPDATE_CACHE(re, &s->gb);
964  val = SHOW_UBITS(re, &s->gb, 1);
965  LAST_SKIP_BITS(re, &s->gb, 1);
966  ZERO_RUN;
967  j = s->scantable.permutated[i];
968  val--;
969  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
970  if (i == se) {
971  if (i > *last_nnz)
972  *last_nnz = i;
973  CLOSE_READER(re, &s->gb);
974  return 0;
975  }
976  } else {
977  run = ((unsigned) code) >> 4;
978  if (run == 0xF) {
979  ZERO_RUN;
980  } else {
981  val = run;
982  run = (1 << run);
983  if (val) {
984  UPDATE_CACHE(re, &s->gb);
985  run += SHOW_UBITS(re, &s->gb, val);
986  LAST_SKIP_BITS(re, &s->gb, val);
987  }
988  *EOBRUN = run - 1;
989  break;
990  }
991  }
992  }
993 
994  if (i > *last_nnz)
995  *last_nnz = i;
996  }
997 
998  for (; i <= last; i++) {
999  j = s->scantable.permutated[i];
1000  if (block[j])
1001  REFINE_BIT(j)
1002  }
1003  CLOSE_READER(re, &s->gb);
1004 
1005  return 0;
1006 }
1007 #undef REFINE_BIT
1008 #undef ZERO_RUN
1009 
1010 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1011 {
1012  int i;
1013  int reset = 0;
1014 
1015  if (s->restart_interval) {
1016  s->restart_count--;
1017  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1018  align_get_bits(&s->gb);
1019  for (i = 0; i < nb_components; i++) /* reset dc */
1020  s->last_dc[i] = (4 << s->bits);
1021  }
1022 
1023  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1024  /* skip RSTn */
1025  if (s->restart_count == 0) {
1026  if( show_bits(&s->gb, i) == (1 << i) - 1
1027  || show_bits(&s->gb, i) == 0xFF) {
1028  int pos = get_bits_count(&s->gb);
1029  align_get_bits(&s->gb);
1030  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1031  skip_bits(&s->gb, 8);
1032  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1033  for (i = 0; i < nb_components; i++) /* reset dc */
1034  s->last_dc[i] = (4 << s->bits);
1035  reset = 1;
1036  } else
1037  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1038  }
1039  }
1040  }
1041  return reset;
1042 }
1043 
1044 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1045 {
1046  int i, mb_x, mb_y;
1047  uint16_t (*buffer)[4];
1048  int left[4], top[4], topleft[4];
1049  const int linesize = s->linesize[0];
1050  const int mask = ((1 << s->bits) - 1) << point_transform;
1051  int resync_mb_y = 0;
1052  int resync_mb_x = 0;
1053 
1054  if (s->nb_components != 3 && s->nb_components != 4)
1055  return AVERROR_INVALIDDATA;
1056  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1057  return AVERROR_INVALIDDATA;
1058 
1059 
1060  s->restart_count = s->restart_interval;
1061 
1062  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size,
1063  (unsigned)s->mb_width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1064  if (!s->ljpeg_buffer)
1065  return AVERROR(ENOMEM);
1066 
1067  buffer = s->ljpeg_buffer;
1068 
1069  for (i = 0; i < 4; i++)
1070  buffer[0][i] = 1 << (s->bits - 1);
1071 
1072  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1073  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1074 
1075  if (s->interlaced && s->bottom_field)
1076  ptr += linesize >> 1;
1077 
1078  for (i = 0; i < 4; i++)
1079  top[i] = left[i] = topleft[i] = buffer[0][i];
1080 
1081  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1082  int modified_predictor = predictor;
1083 
1084  if (get_bits_left(&s->gb) < 1) {
1085  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1086  return AVERROR_INVALIDDATA;
1087  }
1088 
1089  if (s->restart_interval && !s->restart_count){
1090  s->restart_count = s->restart_interval;
1091  resync_mb_x = mb_x;
1092  resync_mb_y = mb_y;
1093  for(i=0; i<4; i++)
1094  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1095  }
1096  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1097  modified_predictor = 1;
1098 
1099  for (i=0;i<nb_components;i++) {
1100  int pred, dc;
1101 
1102  topleft[i] = top[i];
1103  top[i] = buffer[mb_x][i];
1104 
1105  PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
1106 
1107  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1108  if(dc == 0xFFFFF)
1109  return -1;
1110 
1111  left[i] = buffer[mb_x][i] =
1112  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1113  }
1114 
1115  if (s->restart_interval && !--s->restart_count) {
1116  align_get_bits(&s->gb);
1117  skip_bits(&s->gb, 16); /* skip RSTn */
1118  }
1119  }
1120  if (s->rct && s->nb_components == 4) {
1121  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1122  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1123  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1124  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1125  ptr[4*mb_x + 0] = buffer[mb_x][3];
1126  }
1127  } else if (s->nb_components == 4) {
1128  for(i=0; i<nb_components; i++) {
1129  int c= s->comp_index[i];
1130  if (s->bits <= 8) {
1131  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1132  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1133  }
1134  } else if(s->bits == 9) {
1135  return AVERROR_PATCHWELCOME;
1136  } else {
1137  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1138  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1139  }
1140  }
1141  }
1142  } else if (s->rct) {
1143  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1144  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1145  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1146  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1147  }
1148  } else if (s->pegasus_rct) {
1149  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1150  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1151  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1152  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1153  }
1154  } else {
1155  for(i=0; i<nb_components; i++) {
1156  int c= s->comp_index[i];
1157  if (s->bits <= 8) {
1158  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1159  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1160  }
1161  } else if(s->bits == 9) {
1162  return AVERROR_PATCHWELCOME;
1163  } else {
1164  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1165  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1166  }
1167  }
1168  }
1169  }
1170  }
1171  return 0;
1172 }
1173 
1174 static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
1175  int point_transform, int nb_components)
1176 {
1177  int i, mb_x, mb_y, mask;
1178  int bits= (s->bits+7)&~7;
1179  int resync_mb_y = 0;
1180  int resync_mb_x = 0;
1181 
1182  point_transform += bits - s->bits;
1183  mask = ((1 << s->bits) - 1) << point_transform;
1184 
1185  av_assert0(nb_components>=1 && nb_components<=4);
1186 
1187  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1188  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1189  if (get_bits_left(&s->gb) < 1) {
1190  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1191  return AVERROR_INVALIDDATA;
1192  }
1193  if (s->restart_interval && !s->restart_count){
1194  s->restart_count = s->restart_interval;
1195  resync_mb_x = mb_x;
1196  resync_mb_y = mb_y;
1197  }
1198 
1199  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1200  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1201  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1202  for (i = 0; i < nb_components; i++) {
1203  uint8_t *ptr;
1204  uint16_t *ptr16;
1205  int n, h, v, x, y, c, j, linesize;
1206  n = s->nb_blocks[i];
1207  c = s->comp_index[i];
1208  h = s->h_scount[i];
1209  v = s->v_scount[i];
1210  x = 0;
1211  y = 0;
1212  linesize= s->linesize[c];
1213 
1214  if(bits>8) linesize /= 2;
1215 
1216  for(j=0; j<n; j++) {
1217  int pred, dc;
1218 
1219  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1220  if(dc == 0xFFFFF)
1221  return -1;
1222  if ( h * mb_x + x >= s->width
1223  || v * mb_y + y >= s->height) {
1224  // Nothing to do
1225  } else if (bits<=8) {
1226  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1227  if(y==0 && toprow){
1228  if(x==0 && leftcol){
1229  pred= 1 << (bits - 1);
1230  }else{
1231  pred= ptr[-1];
1232  }
1233  }else{
1234  if(x==0 && leftcol){
1235  pred= ptr[-linesize];
1236  }else{
1237  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1238  }
1239  }
1240 
1241  if (s->interlaced && s->bottom_field)
1242  ptr += linesize >> 1;
1243  pred &= mask;
1244  *ptr= pred + ((unsigned)dc << point_transform);
1245  }else{
1246  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1247  if(y==0 && toprow){
1248  if(x==0 && leftcol){
1249  pred= 1 << (bits - 1);
1250  }else{
1251  pred= ptr16[-1];
1252  }
1253  }else{
1254  if(x==0 && leftcol){
1255  pred= ptr16[-linesize];
1256  }else{
1257  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1258  }
1259  }
1260 
1261  if (s->interlaced && s->bottom_field)
1262  ptr16 += linesize >> 1;
1263  pred &= mask;
1264  *ptr16= pred + ((unsigned)dc << point_transform);
1265  }
1266  if (++x == h) {
1267  x = 0;
1268  y++;
1269  }
1270  }
1271  }
1272  } else {
1273  for (i = 0; i < nb_components; i++) {
1274  uint8_t *ptr;
1275  uint16_t *ptr16;
1276  int n, h, v, x, y, c, j, linesize, dc;
1277  n = s->nb_blocks[i];
1278  c = s->comp_index[i];
1279  h = s->h_scount[i];
1280  v = s->v_scount[i];
1281  x = 0;
1282  y = 0;
1283  linesize = s->linesize[c];
1284 
1285  if(bits>8) linesize /= 2;
1286 
1287  for (j = 0; j < n; j++) {
1288  int pred;
1289 
1290  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1291  if(dc == 0xFFFFF)
1292  return -1;
1293  if ( h * mb_x + x >= s->width
1294  || v * mb_y + y >= s->height) {
1295  // Nothing to do
1296  } else if (bits<=8) {
1297  ptr = s->picture_ptr->data[c] +
1298  (linesize * (v * mb_y + y)) +
1299  (h * mb_x + x); //FIXME optimize this crap
1300  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1301 
1302  pred &= mask;
1303  *ptr = pred + ((unsigned)dc << point_transform);
1304  }else{
1305  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1306  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1307 
1308  pred &= mask;
1309  *ptr16= pred + ((unsigned)dc << point_transform);
1310  }
1311 
1312  if (++x == h) {
1313  x = 0;
1314  y++;
1315  }
1316  }
1317  }
1318  }
1319  if (s->restart_interval && !--s->restart_count) {
1320  align_get_bits(&s->gb);
1321  skip_bits(&s->gb, 16); /* skip RSTn */
1322  }
1323  }
1324  }
1325  return 0;
1326 }
1327 
1329  uint8_t *dst, const uint8_t *src,
1330  int linesize, int lowres)
1331 {
1332  switch (lowres) {
1333  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1334  break;
1335  case 1: copy_block4(dst, src, linesize, linesize, 4);
1336  break;
1337  case 2: copy_block2(dst, src, linesize, linesize, 2);
1338  break;
1339  case 3: *dst = *src;
1340  break;
1341  }
1342 }
1343 
1344 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1345 {
1346  int block_x, block_y;
1347  int size = 8 >> s->avctx->lowres;
1348  if (s->bits > 8) {
1349  for (block_y=0; block_y<size; block_y++)
1350  for (block_x=0; block_x<size; block_x++)
1351  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1352  } else {
1353  for (block_y=0; block_y<size; block_y++)
1354  for (block_x=0; block_x<size; block_x++)
1355  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1356  }
1357 }
1358 
1359 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1360  int Al, const uint8_t *mb_bitmask,
1361  int mb_bitmask_size,
1362  const AVFrame *reference)
1363 {
1364  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1366  const uint8_t *reference_data[MAX_COMPONENTS];
1367  int linesize[MAX_COMPONENTS];
1368  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1369  int bytes_per_pixel = 1 + (s->bits > 8);
1370 
1371  if (mb_bitmask) {
1372  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1373  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1374  return AVERROR_INVALIDDATA;
1375  }
1376  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1377  }
1378 
1379  s->restart_count = 0;
1380 
1381  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1382  &chroma_v_shift);
1383  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1384  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1385 
1386  for (i = 0; i < nb_components; i++) {
1387  int c = s->comp_index[i];
1388  data[c] = s->picture_ptr->data[c];
1389  reference_data[c] = reference ? reference->data[c] : NULL;
1390  linesize[c] = s->linesize[c];
1391  s->coefs_finished[c] |= 1;
1392  }
1393 
1394  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1395  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1396  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1397 
1398  if (s->restart_interval && !s->restart_count)
1399  s->restart_count = s->restart_interval;
1400 
1401  if (get_bits_left(&s->gb) < 0) {
1402  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1403  -get_bits_left(&s->gb));
1404  return AVERROR_INVALIDDATA;
1405  }
1406  for (i = 0; i < nb_components; i++) {
1407  uint8_t *ptr;
1408  int n, h, v, x, y, c, j;
1409  int block_offset;
1410  n = s->nb_blocks[i];
1411  c = s->comp_index[i];
1412  h = s->h_scount[i];
1413  v = s->v_scount[i];
1414  x = 0;
1415  y = 0;
1416  for (j = 0; j < n; j++) {
1417  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1418  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1419 
1420  if (s->interlaced && s->bottom_field)
1421  block_offset += linesize[c] >> 1;
1422  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1423  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1424  ptr = data[c] + block_offset;
1425  } else
1426  ptr = NULL;
1427  if (!s->progressive) {
1428  if (copy_mb) {
1429  if (ptr)
1430  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1431  linesize[c], s->avctx->lowres);
1432 
1433  } else {
1434  s->bdsp.clear_block(s->block);
1435  if (decode_block(s, s->block, i,
1436  s->dc_index[i], s->ac_index[i],
1437  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1438  av_log(s->avctx, AV_LOG_ERROR,
1439  "error y=%d x=%d\n", mb_y, mb_x);
1440  return AVERROR_INVALIDDATA;
1441  }
1442  if (ptr) {
1443  s->idsp.idct_put(ptr, linesize[c], s->block);
1444  if (s->bits & 7)
1445  shift_output(s, ptr, linesize[c]);
1446  }
1447  }
1448  } else {
1449  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1450  (h * mb_x + x);
1451  int16_t *block = s->blocks[c][block_idx];
1452  if (Ah)
1453  block[0] += get_bits1(&s->gb) *
1454  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1455  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1456  s->quant_matrixes[s->quant_sindex[i]],
1457  Al) < 0) {
1458  av_log(s->avctx, AV_LOG_ERROR,
1459  "error y=%d x=%d\n", mb_y, mb_x);
1460  return AVERROR_INVALIDDATA;
1461  }
1462  }
1463  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1464  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1465  mb_x, mb_y, x, y, c, s->bottom_field,
1466  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1467  if (++x == h) {
1468  x = 0;
1469  y++;
1470  }
1471  }
1472  }
1473 
1474  handle_rstn(s, nb_components);
1475  }
1476  }
1477  return 0;
1478 }
1479 
1481  int se, int Ah, int Al)
1482 {
1483  int mb_x, mb_y;
1484  int EOBRUN = 0;
1485  int c = s->comp_index[0];
1486  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1487 
1488  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1489  if (se < ss || se > 63) {
1490  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1491  return AVERROR_INVALIDDATA;
1492  }
1493 
1494  // s->coefs_finished is a bitmask for coefficients coded
1495  // ss and se are parameters telling start and end coefficients
1496  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1497 
1498  s->restart_count = 0;
1499 
1500  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1501  int block_idx = mb_y * s->block_stride[c];
1502  int16_t (*block)[64] = &s->blocks[c][block_idx];
1503  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1504  if (get_bits_left(&s->gb) <= 0) {
1505  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1506  return AVERROR_INVALIDDATA;
1507  }
1508  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1509  int ret;
1510  if (s->restart_interval && !s->restart_count)
1511  s->restart_count = s->restart_interval;
1512 
1513  if (Ah)
1514  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1515  quant_matrix, ss, se, Al, &EOBRUN);
1516  else
1517  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1518  quant_matrix, ss, se, Al, &EOBRUN);
1519 
1520  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1522  if (ret < 0) {
1523  av_log(s->avctx, AV_LOG_ERROR,
1524  "error y=%d x=%d\n", mb_y, mb_x);
1525  return AVERROR_INVALIDDATA;
1526  }
1527 
1528  if (handle_rstn(s, 0))
1529  EOBRUN = 0;
1530  }
1531  }
1532  return 0;
1533 }
1534 
1536 {
1537  int mb_x, mb_y;
1538  int c;
1539  const int bytes_per_pixel = 1 + (s->bits > 8);
1540  const int block_size = s->lossless ? 1 : 8;
1541 
1542  for (c = 0; c < s->nb_components; c++) {
1543  uint8_t *data = s->picture_ptr->data[c];
1544  int linesize = s->linesize[c];
1545  int h = s->h_max / s->h_count[c];
1546  int v = s->v_max / s->v_count[c];
1547  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1548  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1549 
1550  if (~s->coefs_finished[c])
1551  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1552 
1553  if (s->interlaced && s->bottom_field)
1554  data += linesize >> 1;
1555 
1556  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1557  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1558  int block_idx = mb_y * s->block_stride[c];
1559  int16_t (*block)[64] = &s->blocks[c][block_idx];
1560  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1561  s->idsp.idct_put(ptr, linesize, *block);
1562  if (s->bits & 7)
1563  shift_output(s, ptr, linesize);
1564  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1565  }
1566  }
1567  }
1568 }
1569 
1571  int mb_bitmask_size, const AVFrame *reference)
1572 {
1573  int len, nb_components, i, h, v, predictor, point_transform;
1574  int index, id, ret;
1575  const int block_size = s->lossless ? 1 : 8;
1576  int ilv, prev_shift;
1577 
1578  if (!s->got_picture) {
1579  av_log(s->avctx, AV_LOG_WARNING,
1580  "Can not process SOS before SOF, skipping\n");
1581  return -1;
1582  }
1583 
1584  if (reference) {
1585  if (reference->width != s->picture_ptr->width ||
1586  reference->height != s->picture_ptr->height ||
1587  reference->format != s->picture_ptr->format) {
1588  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1589  return AVERROR_INVALIDDATA;
1590  }
1591  }
1592 
1593  /* XXX: verify len field validity */
1594  len = get_bits(&s->gb, 16);
1595  nb_components = get_bits(&s->gb, 8);
1596  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1598  "decode_sos: nb_components (%d)",
1599  nb_components);
1600  return AVERROR_PATCHWELCOME;
1601  }
1602  if (len != 6 + 2 * nb_components) {
1603  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1604  return AVERROR_INVALIDDATA;
1605  }
1606  for (i = 0; i < nb_components; i++) {
1607  id = get_bits(&s->gb, 8) - 1;
1608  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1609  /* find component index */
1610  for (index = 0; index < s->nb_components; index++)
1611  if (id == s->component_id[index])
1612  break;
1613  if (index == s->nb_components) {
1614  av_log(s->avctx, AV_LOG_ERROR,
1615  "decode_sos: index(%d) out of components\n", index);
1616  return AVERROR_INVALIDDATA;
1617  }
1618  /* Metasoft MJPEG codec has Cb and Cr swapped */
1619  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1620  && nb_components == 3 && s->nb_components == 3 && i)
1621  index = 3 - i;
1622 
1623  s->quant_sindex[i] = s->quant_index[index];
1624  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1625  s->h_scount[i] = s->h_count[index];
1626  s->v_scount[i] = s->v_count[index];
1627 
1628  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1629  index = (index+2)%3;
1630 
1631  s->comp_index[i] = index;
1632 
1633  s->dc_index[i] = get_bits(&s->gb, 4);
1634  s->ac_index[i] = get_bits(&s->gb, 4);
1635 
1636  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1637  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1638  goto out_of_range;
1639  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1640  goto out_of_range;
1641  }
1642 
1643  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1644  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1645  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1646  prev_shift = get_bits(&s->gb, 4); /* Ah */
1647  point_transform = get_bits(&s->gb, 4); /* Al */
1648  }else
1649  prev_shift = point_transform = 0;
1650 
1651  if (nb_components > 1) {
1652  /* interleaved stream */
1653  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1654  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1655  } else if (!s->ls) { /* skip this for JPEG-LS */
1656  h = s->h_max / s->h_scount[0];
1657  v = s->v_max / s->v_scount[0];
1658  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1659  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1660  s->nb_blocks[0] = 1;
1661  s->h_scount[0] = 1;
1662  s->v_scount[0] = 1;
1663  }
1664 
1665  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1666  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1667  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1668  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1669  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1670 
1671 
1672  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1673  for (i = s->mjpb_skiptosod; i > 0; i--)
1674  skip_bits(&s->gb, 8);
1675 
1676 next_field:
1677  for (i = 0; i < nb_components; i++)
1678  s->last_dc[i] = (4 << s->bits);
1679 
1680  if (s->avctx->hwaccel) {
1681  int bytes_to_start = get_bits_count(&s->gb) / 8;
1682  av_assert0(bytes_to_start >= 0 &&
1683  s->raw_scan_buffer_size >= bytes_to_start);
1684 
1685  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1686  s->raw_scan_buffer + bytes_to_start,
1687  s->raw_scan_buffer_size - bytes_to_start);
1688  if (ret < 0)
1689  return ret;
1690 
1691  } else if (s->lossless) {
1692  av_assert0(s->picture_ptr == s->picture);
1693  if (CONFIG_JPEGLS_DECODER && s->ls) {
1694 // for () {
1695 // reset_ls_coding_parameters(s, 0);
1696 
1697  if ((ret = ff_jpegls_decode_picture(s, predictor,
1698  point_transform, ilv)) < 0)
1699  return ret;
1700  } else {
1701  if (s->rgb) {
1702  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1703  return ret;
1704  } else {
1705  if ((ret = ljpeg_decode_yuv_scan(s, predictor,
1706  point_transform,
1707  nb_components)) < 0)
1708  return ret;
1709  }
1710  }
1711  } else {
1712  if (s->progressive && predictor) {
1713  av_assert0(s->picture_ptr == s->picture);
1714  if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor,
1715  ilv, prev_shift,
1716  point_transform)) < 0)
1717  return ret;
1718  } else {
1719  if ((ret = mjpeg_decode_scan(s, nb_components,
1720  prev_shift, point_transform,
1721  mb_bitmask, mb_bitmask_size, reference)) < 0)
1722  return ret;
1723  }
1724  }
1725 
1726  if (s->interlaced &&
1727  get_bits_left(&s->gb) > 32 &&
1728  show_bits(&s->gb, 8) == 0xFF) {
1729  GetBitContext bak = s->gb;
1730  align_get_bits(&bak);
1731  if (show_bits(&bak, 16) == 0xFFD1) {
1732  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1733  s->gb = bak;
1734  skip_bits(&s->gb, 16);
1735  s->bottom_field ^= 1;
1736 
1737  goto next_field;
1738  }
1739  }
1740 
1741  emms_c();
1742  return 0;
1743  out_of_range:
1744  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1745  return AVERROR_INVALIDDATA;
1746 }
1747 
1749 {
1750  if (get_bits(&s->gb, 16) != 4)
1751  return AVERROR_INVALIDDATA;
1752  s->restart_interval = get_bits(&s->gb, 16);
1753  s->restart_count = 0;
1754  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1755  s->restart_interval);
1756 
1757  return 0;
1758 }
1759 
1761 {
1762  int len, id, i;
1763 
1764  len = get_bits(&s->gb, 16);
1765  if (len < 6)
1766  return AVERROR_INVALIDDATA;
1767  if (8 * len > get_bits_left(&s->gb))
1768  return AVERROR_INVALIDDATA;
1769 
1770  id = get_bits_long(&s->gb, 32);
1771  len -= 6;
1772 
1773  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1774  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1775  av_fourcc2str(av_bswap32(id)), id, len);
1776 
1777  /* Buggy AVID, it puts EOI only at every 10th frame. */
1778  /* Also, this fourcc is used by non-avid files too, it holds some
1779  information, but it's always present in AVID-created files. */
1780  if (id == AV_RB32("AVI1")) {
1781  /* structure:
1782  4bytes AVI1
1783  1bytes polarity
1784  1bytes always zero
1785  4bytes field_size
1786  4bytes field_size_less_padding
1787  */
1788  s->buggy_avid = 1;
1789  i = get_bits(&s->gb, 8); len--;
1790  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1791  goto out;
1792  }
1793 
1794  if (id == AV_RB32("JFIF")) {
1795  int t_w, t_h, v1, v2;
1796  if (len < 8)
1797  goto out;
1798  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1799  v1 = get_bits(&s->gb, 8);
1800  v2 = get_bits(&s->gb, 8);
1801  skip_bits(&s->gb, 8);
1802 
1803  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1804  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1805  if ( s->avctx->sample_aspect_ratio.num <= 0
1806  || s->avctx->sample_aspect_ratio.den <= 0) {
1807  s->avctx->sample_aspect_ratio.num = 0;
1808  s->avctx->sample_aspect_ratio.den = 1;
1809  }
1810 
1811  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1812  av_log(s->avctx, AV_LOG_INFO,
1813  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1814  v1, v2,
1815  s->avctx->sample_aspect_ratio.num,
1816  s->avctx->sample_aspect_ratio.den);
1817 
1818  len -= 8;
1819  if (len >= 2) {
1820  t_w = get_bits(&s->gb, 8);
1821  t_h = get_bits(&s->gb, 8);
1822  if (t_w && t_h) {
1823  /* skip thumbnail */
1824  if (len -10 - (t_w * t_h * 3) > 0)
1825  len -= t_w * t_h * 3;
1826  }
1827  len -= 2;
1828  }
1829  goto out;
1830  }
1831 
1832  if ( id == AV_RB32("Adob")
1833  && len >= 7
1834  && show_bits(&s->gb, 8) == 'e'
1835  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1836  skip_bits(&s->gb, 8); /* 'e' */
1837  skip_bits(&s->gb, 16); /* version */
1838  skip_bits(&s->gb, 16); /* flags0 */
1839  skip_bits(&s->gb, 16); /* flags1 */
1840  s->adobe_transform = get_bits(&s->gb, 8);
1841  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1842  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1843  len -= 7;
1844  goto out;
1845  }
1846 
1847  if (id == AV_RB32("LJIF")) {
1848  int rgb = s->rgb;
1849  int pegasus_rct = s->pegasus_rct;
1850  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1851  av_log(s->avctx, AV_LOG_INFO,
1852  "Pegasus lossless jpeg header found\n");
1853  skip_bits(&s->gb, 16); /* version ? */
1854  skip_bits(&s->gb, 16); /* unknown always 0? */
1855  skip_bits(&s->gb, 16); /* unknown always 0? */
1856  skip_bits(&s->gb, 16); /* unknown always 0? */
1857  switch (i=get_bits(&s->gb, 8)) {
1858  case 1:
1859  rgb = 1;
1860  pegasus_rct = 0;
1861  break;
1862  case 2:
1863  rgb = 1;
1864  pegasus_rct = 1;
1865  break;
1866  default:
1867  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1868  }
1869 
1870  len -= 9;
1871  if (s->got_picture)
1872  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1873  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1874  goto out;
1875  }
1876 
1877  s->rgb = rgb;
1878  s->pegasus_rct = pegasus_rct;
1879 
1880  goto out;
1881  }
1882  if (id == AV_RL32("colr") && len > 0) {
1883  s->colr = get_bits(&s->gb, 8);
1884  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1885  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1886  len --;
1887  goto out;
1888  }
1889  if (id == AV_RL32("xfrm") && len > 0) {
1890  s->xfrm = get_bits(&s->gb, 8);
1891  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1892  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1893  len --;
1894  goto out;
1895  }
1896 
1897  /* JPS extension by VRex */
1898  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1899  int flags, layout, type;
1900  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1901  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1902 
1903  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1904  skip_bits(&s->gb, 16); len -= 2; /* block length */
1905  skip_bits(&s->gb, 8); /* reserved */
1906  flags = get_bits(&s->gb, 8);
1907  layout = get_bits(&s->gb, 8);
1908  type = get_bits(&s->gb, 8);
1909  len -= 4;
1910 
1911  av_freep(&s->stereo3d);
1912  s->stereo3d = av_stereo3d_alloc();
1913  if (!s->stereo3d) {
1914  goto out;
1915  }
1916  if (type == 0) {
1917  s->stereo3d->type = AV_STEREO3D_2D;
1918  } else if (type == 1) {
1919  switch (layout) {
1920  case 0x01:
1921  s->stereo3d->type = AV_STEREO3D_LINES;
1922  break;
1923  case 0x02:
1924  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1925  break;
1926  case 0x03:
1927  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
1928  break;
1929  }
1930  if (!(flags & 0x04)) {
1931  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
1932  }
1933  }
1934  goto out;
1935  }
1936 
1937  /* EXIF metadata */
1938  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
1939  GetByteContext gbytes;
1940  int ret, le, ifd_offset, bytes_read;
1941  const uint8_t *aligned;
1942 
1943  skip_bits(&s->gb, 16); // skip padding
1944  len -= 2;
1945 
1946  // init byte wise reading
1947  aligned = align_get_bits(&s->gb);
1948  bytestream2_init(&gbytes, aligned, len);
1949 
1950  // read TIFF header
1951  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
1952  if (ret) {
1953  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
1954  } else {
1955  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
1956 
1957  // read 0th IFD and store the metadata
1958  // (return values > 0 indicate the presence of subimage metadata)
1959  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
1960  if (ret < 0) {
1961  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
1962  }
1963  }
1964 
1965  bytes_read = bytestream2_tell(&gbytes);
1966  skip_bits(&s->gb, bytes_read << 3);
1967  len -= bytes_read;
1968 
1969  goto out;
1970  }
1971 
1972  /* Apple MJPEG-A */
1973  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
1974  id = get_bits_long(&s->gb, 32);
1975  len -= 4;
1976  /* Apple MJPEG-A */
1977  if (id == AV_RB32("mjpg")) {
1978  /* structure:
1979  4bytes field size
1980  4bytes pad field size
1981  4bytes next off
1982  4bytes quant off
1983  4bytes huff off
1984  4bytes image off
1985  4bytes scan off
1986  4bytes data off
1987  */
1988  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1989  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
1990  }
1991  }
1992 
1993  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
1994  int id2;
1995  unsigned seqno;
1996  unsigned nummarkers;
1997 
1998  id = get_bits_long(&s->gb, 32);
1999  id2 = get_bits_long(&s->gb, 24);
2000  len -= 7;
2001  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2002  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2003  goto out;
2004  }
2005 
2006  skip_bits(&s->gb, 8);
2007  seqno = get_bits(&s->gb, 8);
2008  len -= 2;
2009  if (seqno == 0) {
2010  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2011  goto out;
2012  }
2013 
2014  nummarkers = get_bits(&s->gb, 8);
2015  len -= 1;
2016  if (nummarkers == 0) {
2017  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2018  goto out;
2019  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2020  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2021  goto out;
2022  } else if (seqno > nummarkers) {
2023  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2024  goto out;
2025  }
2026 
2027  /* Allocate if this is the first APP2 we've seen. */
2028  if (s->iccnum == 0) {
2029  s->iccdata = av_mallocz(nummarkers * sizeof(*(s->iccdata)));
2030  s->iccdatalens = av_mallocz(nummarkers * sizeof(*(s->iccdatalens)));
2031  if (!s->iccdata || !s->iccdatalens) {
2032  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2033  return AVERROR(ENOMEM);
2034  }
2035  s->iccnum = nummarkers;
2036  }
2037 
2038  if (s->iccdata[seqno - 1]) {
2039  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2040  goto out;
2041  }
2042 
2043  s->iccdatalens[seqno - 1] = len;
2044  s->iccdata[seqno - 1] = av_malloc(len);
2045  if (!s->iccdata[seqno - 1]) {
2046  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2047  return AVERROR(ENOMEM);
2048  }
2049 
2050  memcpy(s->iccdata[seqno - 1], align_get_bits(&s->gb), len);
2051  skip_bits(&s->gb, len << 3);
2052  len = 0;
2053  s->iccread++;
2054 
2055  if (s->iccread > s->iccnum)
2056  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2057  }
2058 
2059 out:
2060  /* slow but needed for extreme adobe jpegs */
2061  if (len < 0)
2062  av_log(s->avctx, AV_LOG_ERROR,
2063  "mjpeg: error, decode_app parser read over the end\n");
2064  while (--len > 0)
2065  skip_bits(&s->gb, 8);
2066 
2067  return 0;
2068 }
2069 
2071 {
2072  int len = get_bits(&s->gb, 16);
2073  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2074  int i;
2075  char *cbuf = av_malloc(len - 1);
2076  if (!cbuf)
2077  return AVERROR(ENOMEM);
2078 
2079  for (i = 0; i < len - 2; i++)
2080  cbuf[i] = get_bits(&s->gb, 8);
2081  if (i > 0 && cbuf[i - 1] == '\n')
2082  cbuf[i - 1] = 0;
2083  else
2084  cbuf[i] = 0;
2085 
2086  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2087  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2088 
2089  /* buggy avid, it puts EOI only at every 10th frame */
2090  if (!strncmp(cbuf, "AVID", 4)) {
2091  parse_avid(s, cbuf, len);
2092  } else if (!strcmp(cbuf, "CS=ITU601"))
2093  s->cs_itu601 = 1;
2094  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2095  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2096  s->flipped = 1;
2097  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2098  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2099  s->multiscope = 2;
2100  }
2101 
2102  av_free(cbuf);
2103  }
2104 
2105  return 0;
2106 }
2107 
2108 /* return the 8 bit start code value and update the search
2109  state. Return -1 if no start code found */
2110 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2111 {
2112  const uint8_t *buf_ptr;
2113  unsigned int v, v2;
2114  int val;
2115  int skipped = 0;
2116 
2117  buf_ptr = *pbuf_ptr;
2118  while (buf_end - buf_ptr > 1) {
2119  v = *buf_ptr++;
2120  v2 = *buf_ptr;
2121  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2122  val = *buf_ptr++;
2123  goto found;
2124  }
2125  skipped++;
2126  }
2127  buf_ptr = buf_end;
2128  val = -1;
2129 found:
2130  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2131  *pbuf_ptr = buf_ptr;
2132  return val;
2133 }
2134 
2136  const uint8_t **buf_ptr, const uint8_t *buf_end,
2137  const uint8_t **unescaped_buf_ptr,
2138  int *unescaped_buf_size)
2139 {
2140  int start_code;
2141  start_code = find_marker(buf_ptr, buf_end);
2142 
2143  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2144  if (!s->buffer)
2145  return AVERROR(ENOMEM);
2146 
2147  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2148  if (start_code == SOS && !s->ls) {
2149  const uint8_t *src = *buf_ptr;
2150  const uint8_t *ptr = src;
2151  uint8_t *dst = s->buffer;
2152 
2153  #define copy_data_segment(skip) do { \
2154  ptrdiff_t length = (ptr - src) - (skip); \
2155  if (length > 0) { \
2156  memcpy(dst, src, length); \
2157  dst += length; \
2158  src = ptr; \
2159  } \
2160  } while (0)
2161 
2162  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2163  ptr = buf_end;
2164  copy_data_segment(0);
2165  } else {
2166  while (ptr < buf_end) {
2167  uint8_t x = *(ptr++);
2168 
2169  if (x == 0xff) {
2170  ptrdiff_t skip = 0;
2171  while (ptr < buf_end && x == 0xff) {
2172  x = *(ptr++);
2173  skip++;
2174  }
2175 
2176  /* 0xFF, 0xFF, ... */
2177  if (skip > 1) {
2178  copy_data_segment(skip);
2179 
2180  /* decrement src as it is equal to ptr after the
2181  * copy_data_segment macro and we might want to
2182  * copy the current value of x later on */
2183  src--;
2184  }
2185 
2186  if (x < RST0 || x > RST7) {
2187  copy_data_segment(1);
2188  if (x)
2189  break;
2190  }
2191  }
2192  }
2193  if (src < ptr)
2194  copy_data_segment(0);
2195  }
2196  #undef copy_data_segment
2197 
2198  *unescaped_buf_ptr = s->buffer;
2199  *unescaped_buf_size = dst - s->buffer;
2200  memset(s->buffer + *unescaped_buf_size, 0,
2202 
2203  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2204  (buf_end - *buf_ptr) - (dst - s->buffer));
2205  } else if (start_code == SOS && s->ls) {
2206  const uint8_t *src = *buf_ptr;
2207  uint8_t *dst = s->buffer;
2208  int bit_count = 0;
2209  int t = 0, b = 0;
2210  PutBitContext pb;
2211 
2212  /* find marker */
2213  while (src + t < buf_end) {
2214  uint8_t x = src[t++];
2215  if (x == 0xff) {
2216  while ((src + t < buf_end) && x == 0xff)
2217  x = src[t++];
2218  if (x & 0x80) {
2219  t -= FFMIN(2, t);
2220  break;
2221  }
2222  }
2223  }
2224  bit_count = t * 8;
2225  init_put_bits(&pb, dst, t);
2226 
2227  /* unescape bitstream */
2228  while (b < t) {
2229  uint8_t x = src[b++];
2230  put_bits(&pb, 8, x);
2231  if (x == 0xFF && b < t) {
2232  x = src[b++];
2233  if (x & 0x80) {
2234  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2235  x &= 0x7f;
2236  }
2237  put_bits(&pb, 7, x);
2238  bit_count--;
2239  }
2240  }
2241  flush_put_bits(&pb);
2242 
2243  *unescaped_buf_ptr = dst;
2244  *unescaped_buf_size = (bit_count + 7) >> 3;
2245  memset(s->buffer + *unescaped_buf_size, 0,
2247  } else {
2248  *unescaped_buf_ptr = *buf_ptr;
2249  *unescaped_buf_size = buf_end - *buf_ptr;
2250  }
2251 
2252  return start_code;
2253 }
2254 
2256 {
2257  int i;
2258 
2259  if (s->iccdata)
2260  for (i = 0; i < s->iccnum; i++)
2261  av_freep(&s->iccdata[i]);
2262  av_freep(&s->iccdata);
2263  av_freep(&s->iccdatalens);
2264 
2265  s->iccread = 0;
2266  s->iccnum = 0;
2267 }
2268 
2269 int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2270  AVPacket *avpkt)
2271 {
2272  AVFrame *frame = data;
2273  const uint8_t *buf = avpkt->data;
2274  int buf_size = avpkt->size;
2275  MJpegDecodeContext *s = avctx->priv_data;
2276  const uint8_t *buf_end, *buf_ptr;
2277  const uint8_t *unescaped_buf_ptr;
2278  int hshift, vshift;
2279  int unescaped_buf_size;
2280  int start_code;
2281  int i, index;
2282  int ret = 0;
2283  int is16bit;
2284 
2285  s->buf_size = buf_size;
2286 
2287  av_dict_free(&s->exif_metadata);
2288  av_freep(&s->stereo3d);
2289  s->adobe_transform = -1;
2290 
2291  if (s->iccnum != 0)
2293 
2294  buf_ptr = buf;
2295  buf_end = buf + buf_size;
2296  while (buf_ptr < buf_end) {
2297  /* find start next marker */
2298  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2299  &unescaped_buf_ptr,
2300  &unescaped_buf_size);
2301  /* EOF */
2302  if (start_code < 0) {
2303  break;
2304  } else if (unescaped_buf_size > INT_MAX / 8) {
2305  av_log(avctx, AV_LOG_ERROR,
2306  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2307  start_code, unescaped_buf_size, buf_size);
2308  return AVERROR_INVALIDDATA;
2309  }
2310  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2311  start_code, buf_end - buf_ptr);
2312 
2313  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2314 
2315  if (ret < 0) {
2316  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2317  goto fail;
2318  }
2319 
2320  s->start_code = start_code;
2321  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2322  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2323 
2324  /* process markers */
2325  if (start_code >= RST0 && start_code <= RST7) {
2326  av_log(avctx, AV_LOG_DEBUG,
2327  "restart marker: %d\n", start_code & 0x0f);
2328  /* APP fields */
2329  } else if (start_code >= APP0 && start_code <= APP15) {
2330  if ((ret = mjpeg_decode_app(s)) < 0)
2331  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2332  av_err2str(ret));
2333  /* Comment */
2334  } else if (start_code == COM) {
2335  ret = mjpeg_decode_com(s);
2336  if (ret < 0)
2337  return ret;
2338  } else if (start_code == DQT) {
2340  if (ret < 0)
2341  return ret;
2342  }
2343 
2344  ret = -1;
2345 
2346  if (!CONFIG_JPEGLS_DECODER &&
2347  (start_code == SOF48 || start_code == LSE)) {
2348  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2349  return AVERROR(ENOSYS);
2350  }
2351 
2352  if (avctx->skip_frame == AVDISCARD_ALL) {
2353  switch(start_code) {
2354  case SOF0:
2355  case SOF1:
2356  case SOF2:
2357  case SOF3:
2358  case SOF48:
2359  case SOI:
2360  case SOS:
2361  case EOI:
2362  break;
2363  default:
2364  goto skip;
2365  }
2366  }
2367 
2368  switch (start_code) {
2369  case SOI:
2370  s->restart_interval = 0;
2371  s->restart_count = 0;
2372  s->raw_image_buffer = buf_ptr;
2373  s->raw_image_buffer_size = buf_end - buf_ptr;
2374  /* nothing to do on SOI */
2375  break;
2376  case DHT:
2377  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2378  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2379  goto fail;
2380  }
2381  break;
2382  case SOF0:
2383  case SOF1:
2384  if (start_code == SOF0)
2385  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2386  else
2388  s->lossless = 0;
2389  s->ls = 0;
2390  s->progressive = 0;
2391  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2392  goto fail;
2393  break;
2394  case SOF2:
2395  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2396  s->lossless = 0;
2397  s->ls = 0;
2398  s->progressive = 1;
2399  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2400  goto fail;
2401  break;
2402  case SOF3:
2403  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2404  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2405  s->lossless = 1;
2406  s->ls = 0;
2407  s->progressive = 0;
2408  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2409  goto fail;
2410  break;
2411  case SOF48:
2412  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2413  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2414  s->lossless = 1;
2415  s->ls = 1;
2416  s->progressive = 0;
2417  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2418  goto fail;
2419  break;
2420  case LSE:
2421  if (!CONFIG_JPEGLS_DECODER ||
2422  (ret = ff_jpegls_decode_lse(s)) < 0)
2423  goto fail;
2424  break;
2425  case EOI:
2426 eoi_parser:
2427  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2428  s->progressive && s->cur_scan && s->got_picture)
2430  s->cur_scan = 0;
2431  if (!s->got_picture) {
2432  av_log(avctx, AV_LOG_WARNING,
2433  "Found EOI before any SOF, ignoring\n");
2434  break;
2435  }
2436  if (s->interlaced) {
2437  s->bottom_field ^= 1;
2438  /* if not bottom field, do not output image yet */
2439  if (s->bottom_field == !s->interlace_polarity)
2440  break;
2441  }
2442  if (avctx->skip_frame == AVDISCARD_ALL) {
2443  s->got_picture = 0;
2444  goto the_end_no_picture;
2445  }
2446  if (s->avctx->hwaccel) {
2447  ret = s->avctx->hwaccel->end_frame(s->avctx);
2448  if (ret < 0)
2449  return ret;
2450 
2451  av_freep(&s->hwaccel_picture_private);
2452  }
2453  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2454  return ret;
2455  *got_frame = 1;
2456  s->got_picture = 0;
2457 
2458  if (!s->lossless) {
2459  int qp = FFMAX3(s->qscale[0],
2460  s->qscale[1],
2461  s->qscale[2]);
2462  int qpw = (s->width + 15) / 16;
2463  AVBufferRef *qp_table_buf = av_buffer_alloc(qpw);
2464  if (qp_table_buf) {
2465  memset(qp_table_buf->data, qp, qpw);
2466  av_frame_set_qp_table(data, qp_table_buf, 0, FF_QSCALE_TYPE_MPEG1);
2467  }
2468 
2469  if(avctx->debug & FF_DEBUG_QP)
2470  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2471  }
2472 
2473  goto the_end;
2474  case SOS:
2475  s->raw_scan_buffer = buf_ptr;
2476  s->raw_scan_buffer_size = buf_end - buf_ptr;
2477 
2478  s->cur_scan++;
2479  if (avctx->skip_frame == AVDISCARD_ALL) {
2480  skip_bits(&s->gb, get_bits_left(&s->gb));
2481  break;
2482  }
2483 
2484  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2485  (avctx->err_recognition & AV_EF_EXPLODE))
2486  goto fail;
2487  break;
2488  case DRI:
2489  if ((ret = mjpeg_decode_dri(s)) < 0)
2490  return ret;
2491  break;
2492  case SOF5:
2493  case SOF6:
2494  case SOF7:
2495  case SOF9:
2496  case SOF10:
2497  case SOF11:
2498  case SOF13:
2499  case SOF14:
2500  case SOF15:
2501  case JPG:
2502  av_log(avctx, AV_LOG_ERROR,
2503  "mjpeg: unsupported coding type (%x)\n", start_code);
2504  break;
2505  }
2506 
2507 skip:
2508  /* eof process start code */
2509  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2510  av_log(avctx, AV_LOG_DEBUG,
2511  "marker parser used %d bytes (%d bits)\n",
2512  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2513  }
2514  if (s->got_picture && s->cur_scan) {
2515  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2516  goto eoi_parser;
2517  }
2518  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2519  return AVERROR_INVALIDDATA;
2520 fail:
2521  s->got_picture = 0;
2522  return ret;
2523 the_end:
2524 
2525  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2526 
2527  if (AV_RB32(s->upscale_h)) {
2528  int p;
2530  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2531  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2532  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2533  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2534  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2535  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2536  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2537  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2538  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2539  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2540  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2541  );
2542  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2543  if (ret)
2544  return ret;
2545 
2546  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2547  for (p = 0; p<s->nb_components; p++) {
2548  uint8_t *line = s->picture_ptr->data[p];
2549  int w = s->width;
2550  int h = s->height;
2551  if (!s->upscale_h[p])
2552  continue;
2553  if (p==1 || p==2) {
2554  w = AV_CEIL_RSHIFT(w, hshift);
2555  h = AV_CEIL_RSHIFT(h, vshift);
2556  }
2557  if (s->upscale_v[p] == 1)
2558  h = (h+1)>>1;
2559  av_assert0(w > 0);
2560  for (i = 0; i < h; i++) {
2561  if (s->upscale_h[p] == 1) {
2562  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2563  else line[w - 1] = line[(w - 1) / 2];
2564  for (index = w - 2; index > 0; index--) {
2565  if (is16bit)
2566  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2567  else
2568  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2569  }
2570  } else if (s->upscale_h[p] == 2) {
2571  if (is16bit) {
2572  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2573  if (w > 1)
2574  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2575  } else {
2576  line[w - 1] = line[(w - 1) / 3];
2577  if (w > 1)
2578  line[w - 2] = line[w - 1];
2579  }
2580  for (index = w - 3; index > 0; index--) {
2581  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2582  }
2583  }
2584  line += s->linesize[p];
2585  }
2586  }
2587  }
2588  if (AV_RB32(s->upscale_v)) {
2589  int p;
2591  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2592  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2593  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2594  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2595  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2596  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2597  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2598  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2599  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2600  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2601  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2602  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2603  );
2604  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2605  if (ret)
2606  return ret;
2607 
2608  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2609  for (p = 0; p < s->nb_components; p++) {
2610  uint8_t *dst;
2611  int w = s->width;
2612  int h = s->height;
2613  if (!s->upscale_v[p])
2614  continue;
2615  if (p==1 || p==2) {
2616  w = AV_CEIL_RSHIFT(w, hshift);
2617  h = AV_CEIL_RSHIFT(h, vshift);
2618  }
2619  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2620  for (i = h - 1; i; i--) {
2621  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2622  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2623  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2624  memcpy(dst, src1, w);
2625  } else {
2626  for (index = 0; index < w; index++)
2627  dst[index] = (src1[index] + src2[index]) >> 1;
2628  }
2629  dst -= s->linesize[p];
2630  }
2631  }
2632  }
2633  if (s->flipped && !s->rgb) {
2634  int j;
2635  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2636  if (ret)
2637  return ret;
2638 
2639  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2640  for (index=0; index<s->nb_components; index++) {
2641  uint8_t *dst = s->picture_ptr->data[index];
2642  int w = s->picture_ptr->width;
2643  int h = s->picture_ptr->height;
2644  if(index && index<3){
2645  w = AV_CEIL_RSHIFT(w, hshift);
2646  h = AV_CEIL_RSHIFT(h, vshift);
2647  }
2648  if(dst){
2649  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2650  for (i=0; i<h/2; i++) {
2651  for (j=0; j<w; j++)
2652  FFSWAP(int, dst[j], dst2[j]);
2653  dst += s->picture_ptr->linesize[index];
2654  dst2 -= s->picture_ptr->linesize[index];
2655  }
2656  }
2657  }
2658  }
2659  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2660  int w = s->picture_ptr->width;
2661  int h = s->picture_ptr->height;
2662  av_assert0(s->nb_components == 4);
2663  for (i=0; i<h; i++) {
2664  int j;
2665  uint8_t *dst[4];
2666  for (index=0; index<4; index++) {
2667  dst[index] = s->picture_ptr->data[index]
2668  + s->picture_ptr->linesize[index]*i;
2669  }
2670  for (j=0; j<w; j++) {
2671  int k = dst[3][j];
2672  int r = dst[0][j] * k;
2673  int g = dst[1][j] * k;
2674  int b = dst[2][j] * k;
2675  dst[0][j] = g*257 >> 16;
2676  dst[1][j] = b*257 >> 16;
2677  dst[2][j] = r*257 >> 16;
2678  dst[3][j] = 255;
2679  }
2680  }
2681  }
2682  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2683  int w = s->picture_ptr->width;
2684  int h = s->picture_ptr->height;
2685  av_assert0(s->nb_components == 4);
2686  for (i=0; i<h; i++) {
2687  int j;
2688  uint8_t *dst[4];
2689  for (index=0; index<4; index++) {
2690  dst[index] = s->picture_ptr->data[index]
2691  + s->picture_ptr->linesize[index]*i;
2692  }
2693  for (j=0; j<w; j++) {
2694  int k = dst[3][j];
2695  int r = (255 - dst[0][j]) * k;
2696  int g = (128 - dst[1][j]) * k;
2697  int b = (128 - dst[2][j]) * k;
2698  dst[0][j] = r*257 >> 16;
2699  dst[1][j] = (g*257 >> 16) + 128;
2700  dst[2][j] = (b*257 >> 16) + 128;
2701  dst[3][j] = 255;
2702  }
2703  }
2704  }
2705 
2706  if (s->stereo3d) {
2708  if (stereo) {
2709  stereo->type = s->stereo3d->type;
2710  stereo->flags = s->stereo3d->flags;
2711  }
2712  av_freep(&s->stereo3d);
2713  }
2714 
2715  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2716  AVFrameSideData *sd;
2717  size_t offset = 0;
2718  int total_size = 0;
2719  int i;
2720 
2721  /* Sum size of all parts. */
2722  for (i = 0; i < s->iccnum; i++)
2723  total_size += s->iccdatalens[i];
2724 
2726  if (!sd) {
2727  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2728  return AVERROR(ENOMEM);
2729  }
2730 
2731  /* Reassemble the parts, which are now in-order. */
2732  for (i = 0; i < s->iccnum; i++) {
2733  memcpy(sd->data + offset, s->iccdata[i], s->iccdatalens[i]);
2734  offset += s->iccdatalens[i];
2735  }
2736  }
2737 
2738  av_dict_copy(&((AVFrame *) data)->metadata, s->exif_metadata, 0);
2739  av_dict_free(&s->exif_metadata);
2740 
2741 the_end_no_picture:
2742  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2743  buf_end - buf_ptr);
2744 // return buf_end - buf_ptr;
2745  return buf_ptr - buf;
2746 }
2747 
2749 {
2750  MJpegDecodeContext *s = avctx->priv_data;
2751  int i, j;
2752 
2753  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2754  av_log(avctx, AV_LOG_INFO, "Single field\n");
2755  }
2756 
2757  if (s->picture) {
2758  av_frame_free(&s->picture);
2759  s->picture_ptr = NULL;
2760  } else if (s->picture_ptr)
2761  av_frame_unref(s->picture_ptr);
2762 
2763  av_freep(&s->buffer);
2764  av_freep(&s->stereo3d);
2765  av_freep(&s->ljpeg_buffer);
2766  s->ljpeg_buffer_size = 0;
2767 
2768  for (i = 0; i < 3; i++) {
2769  for (j = 0; j < 4; j++)
2770  ff_free_vlc(&s->vlcs[i][j]);
2771  }
2772  for (i = 0; i < MAX_COMPONENTS; i++) {
2773  av_freep(&s->blocks[i]);
2774  av_freep(&s->last_nnz[i]);
2775  }
2776  av_dict_free(&s->exif_metadata);
2777 
2779 
2780  av_freep(&s->hwaccel_picture_private);
2781 
2782  return 0;
2783 }
2784 
2785 static void decode_flush(AVCodecContext *avctx)
2786 {
2787  MJpegDecodeContext *s = avctx->priv_data;
2788  s->got_picture = 0;
2789 }
2790 
2791 #if CONFIG_MJPEG_DECODER
2792 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2793 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2794 static const AVOption options[] = {
2795  { "extern_huff", "Use external huffman table.",
2796  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2797  { NULL },
2798 };
2799 
2800 static const AVClass mjpegdec_class = {
2801  .class_name = "MJPEG decoder",
2802  .item_name = av_default_item_name,
2803  .option = options,
2804  .version = LIBAVUTIL_VERSION_INT,
2805 };
2806 
2808  .name = "mjpeg",
2809  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2810  .type = AVMEDIA_TYPE_VIDEO,
2811  .id = AV_CODEC_ID_MJPEG,
2812  .priv_data_size = sizeof(MJpegDecodeContext),
2814  .close = ff_mjpeg_decode_end,
2816  .flush = decode_flush,
2817  .capabilities = AV_CODEC_CAP_DR1,
2818  .max_lowres = 3,
2819  .priv_class = &mjpegdec_class,
2821  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2823  .hw_configs = (const AVCodecHWConfigInternal*[]) {
2824 #if CONFIG_MJPEG_NVDEC_HWACCEL
2825  HWACCEL_NVDEC(mjpeg),
2826 #endif
2827 #if CONFIG_MJPEG_VAAPI_HWACCEL
2828  HWACCEL_VAAPI(mjpeg),
2829 #endif
2830  NULL
2831  },
2832 };
2833 #endif
2834 #if CONFIG_THP_DECODER
2836  .name = "thp",
2837  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2838  .type = AVMEDIA_TYPE_VIDEO,
2839  .id = AV_CODEC_ID_THP,
2840  .priv_data_size = sizeof(MJpegDecodeContext),
2842  .close = ff_mjpeg_decode_end,
2844  .flush = decode_flush,
2845  .capabilities = AV_CODEC_CAP_DR1,
2846  .max_lowres = 3,
2847  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
2848 };
2849 #endif
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwaccel.h:71
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2729
AVCodec
AVCodec.
Definition: avcodec.h:3481
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:207
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
av_buffer_alloc
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
ff_mjpeg_build_huffman_codes
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
Definition: jpegtables.c:127
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2193
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1371
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1328
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
n
int n
Definition: avisynth_c.h:760
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2785
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:927
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:366
SOF0
@ SOF0
Definition: mjpeg.h:39
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2694
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:706
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
avpriv_mjpeg_bits_ac_luminance
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:263
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:130
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:173
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
avpriv_mjpeg_val_ac_luminance
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
AVFrame::width
int width
Definition: frame.h:353
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:2999
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2222
AVOption
AVOption.
Definition: opt.h:246
b
#define b
Definition: input.c:41
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:761
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:91
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
build_vlc
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
Definition: mjpegdec.c:53
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: avcodec.h:318
avpriv_mjpeg_bits_dc_luminance
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2651
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:502
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:241
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1174
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1344
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:1574
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:139
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3040
fail
#define fail()
Definition: checkasm.h:120
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:2995
GetBitContext
Definition: get_bits.h:61
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2070
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:76
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: avcodec.h:1544
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:1753
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
src
#define src
Definition: vp8dsp.c:254
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:60
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
av_bswap32
#define av_bswap32
Definition: bswap.h:33
avpriv_mjpeg_bits_dc_chrominance
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:164
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:122
aligned
static int aligned(int val)
Definition: dashdec.c:165
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:828
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:3228
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:2997
mask
static const uint16_t mask[17]
Definition: lzw.c:38
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1010
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
SOF5
@ SOF5
Definition: mjpeg.h:44
hwaccel.h
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
width
#define width
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:119
avpriv_mjpeg_val_dc
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:2998
g
const char * g
Definition: vf_curves.c:115
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:348
bits
uint8_t bits
Definition: vp3data.h:202
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:169
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
RST0
@ RST0
Definition: mjpeg.h:61
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2255
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2748
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:35
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:811
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
ff_thp_decoder
AVCodec ff_thp_decoder
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:377
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:263
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:378
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1535
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
run
uint8_t run
Definition: svq3.c:206
VD
#define VD
Definition: cuviddec.c:1127
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:200
SOF13
@ SOF13
Definition: mjpeg.h:52
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
MJpegDecodeContext
Definition: mjpegdec.h:46
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1359
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:945
lowres
static int lowres
Definition: ffplay.c:335
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1480
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2705
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
avpriv_mjpeg_val_ac_chrominance
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
copy_data_segment
#define copy_data_segment(skip)
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:523
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1044
AVPacket::size
int size
Definition: avcodec.h:1478
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:329
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
FF_QSCALE_TYPE_MPEG1
#define FF_QSCALE_TYPE_MPEG1
Definition: internal.h:81
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:846
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1570
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
size
int size
Definition: twinvq_data.h:11134
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:92
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:203
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:368
AVCodecHWConfigInternal
Definition: hwaccel.h:29
val
const char const char void * val
Definition: avisynth_c.h:863
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2110
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AVCodec::id
enum AVCodecID id
Definition: avcodec.h:3495
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: avcodec.h:225
src1
#define src1
Definition: h264pred.c:139
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:166
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2269
interlaced
uint8_t interlaced
Definition: mxfenc.c:2217
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:779
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:2996
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1748
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:128
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:43
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2664
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2207
len
int len
Definition: vorbis_enc_data.h:452
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:521
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:919
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:2655
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:790
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2135
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: avcodec.h:325
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
AVFrame::height
int height
Definition: frame.h:353
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwaccel.h:73
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:544
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
VLC
Definition: vlc.h:26
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
profiles
static const AVProfile profiles[]
Definition: libfdk-aacenc.c:426
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:2650
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:305
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1760
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2256
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:201
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1590
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
AV_FIELD_BB
@ AV_FIELD_BB
Definition: avcodec.h:1547
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
length
const char int length
Definition: avisynth_c.h:860
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
avpriv_mjpeg_bits_ac_chrominance
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
ff_mjpeg_decoder
AVCodec ff_mjpeg_decoder
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:93
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
av_frame_set_qp_table
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:54
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
re
float re
Definition: fft.c:82