FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "hwconfig.h"
40 #include "idctdsp.h"
41 #include "internal.h"
42 #include "jpegtables.h"
43 #include "mjpeg.h"
44 #include "mjpegdec.h"
45 #include "jpeglsdec.h"
46 #include "profiles.h"
47 #include "put_bits.h"
48 #include "tiff.h"
49 #include "exif.h"
50 #include "bytestream.h"
51 
52 
53 static int build_vlc(VLC *vlc, const uint8_t *bits_table,
54  const uint8_t *val_table, int nb_codes,
55  int use_static, int is_ac)
56 {
57  uint8_t huff_size[256] = { 0 };
58  uint16_t huff_code[256];
59  uint16_t huff_sym[256];
60  int i;
61 
62  av_assert0(nb_codes <= 256);
63 
64  ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
65 
66  for (i = 0; i < 256; i++)
67  huff_sym[i] = i + 16 * is_ac;
68 
69  if (is_ac)
70  huff_sym[0] = 16 * 256;
71 
72  return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
73  huff_code, 2, 2, huff_sym, 2, 2, use_static);
74 }
75 
77 {
78  static const struct {
79  int class;
80  int index;
81  const uint8_t *bits;
82  const uint8_t *values;
83  int codes;
84  int length;
85  } ht[] = {
87  avpriv_mjpeg_val_dc, 12, 12 },
89  avpriv_mjpeg_val_dc, 12, 12 },
98  };
99  int i, ret;
100 
101  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
102  ret = build_vlc(&s->vlcs[ht[i].class][ht[i].index],
103  ht[i].bits, ht[i].values, ht[i].codes,
104  0, ht[i].class == 1);
105  if (ret < 0)
106  return ret;
107 
108  if (ht[i].class < 2) {
109  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
110  ht[i].bits + 1, 16);
111  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
112  ht[i].values, ht[i].length);
113  }
114  }
115 
116  return 0;
117 }
118 
119 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
120 {
121  s->buggy_avid = 1;
122  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
123  s->interlace_polarity = 1;
124  if (len > 14 && buf[12] == 2) /* 2 - PAL */
125  s->interlace_polarity = 0;
126  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
127  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
128 }
129 
130 static void init_idct(AVCodecContext *avctx)
131 {
132  MJpegDecodeContext *s = avctx->priv_data;
133 
134  ff_idctdsp_init(&s->idsp, avctx);
137 }
138 
140 {
141  MJpegDecodeContext *s = avctx->priv_data;
142  int ret;
143 
144  if (!s->picture_ptr) {
145  s->picture = av_frame_alloc();
146  if (!s->picture)
147  return AVERROR(ENOMEM);
148  s->picture_ptr = s->picture;
149  }
150 
151  s->avctx = avctx;
152  ff_blockdsp_init(&s->bdsp, avctx);
153  ff_hpeldsp_init(&s->hdsp, avctx->flags);
154  init_idct(avctx);
155  s->buffer_size = 0;
156  s->buffer = NULL;
157  s->start_code = -1;
158  s->first_picture = 1;
159  s->got_picture = 0;
160  s->org_height = avctx->coded_height;
162  avctx->colorspace = AVCOL_SPC_BT470BG;
164 
165  if ((ret = init_default_huffman_tables(s)) < 0)
166  return ret;
167 
168  if (s->extern_huff) {
169  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
170  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
171  return ret;
172  if (ff_mjpeg_decode_dht(s)) {
173  av_log(avctx, AV_LOG_ERROR,
174  "error using external huffman table, switching back to internal\n");
176  }
177  }
178  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
179  s->interlace_polarity = 1; /* bottom field first */
180  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
181  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
182  if (avctx->codec_tag == AV_RL32("MJPG"))
183  s->interlace_polarity = 1;
184  }
185 
186  if ( avctx->extradata_size > 8
187  && AV_RL32(avctx->extradata) == 0x2C
188  && AV_RL32(avctx->extradata+4) == 0x18) {
189  parse_avid(s, avctx->extradata, avctx->extradata_size);
190  }
191 
192  if (avctx->codec->id == AV_CODEC_ID_AMV)
193  s->flipped = 1;
194 
195  return 0;
196 }
197 
198 
199 /* quantize tables */
201 {
202  int len, index, i;
203 
204  len = get_bits(&s->gb, 16) - 2;
205 
206  if (8*len > get_bits_left(&s->gb)) {
207  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
208  return AVERROR_INVALIDDATA;
209  }
210 
211  while (len >= 65) {
212  int pr = get_bits(&s->gb, 4);
213  if (pr > 1) {
214  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
215  return AVERROR_INVALIDDATA;
216  }
217  index = get_bits(&s->gb, 4);
218  if (index >= 4)
219  return -1;
220  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
221  /* read quant table */
222  for (i = 0; i < 64; i++) {
223  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
224  if (s->quant_matrixes[index][i] == 0) {
225  av_log(s->avctx, AV_LOG_ERROR, "dqt: 0 quant value\n");
226  return AVERROR_INVALIDDATA;
227  }
228  }
229 
230  // XXX FIXME fine-tune, and perhaps add dc too
231  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
232  s->quant_matrixes[index][8]) >> 1;
233  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
234  index, s->qscale[index]);
235  len -= 1 + 64 * (1+pr);
236  }
237  return 0;
238 }
239 
240 /* decode huffman tables and build VLC decoders */
242 {
243  int len, index, i, class, n, v, code_max;
244  uint8_t bits_table[17];
245  uint8_t val_table[256];
246  int ret = 0;
247 
248  len = get_bits(&s->gb, 16) - 2;
249 
250  if (8*len > get_bits_left(&s->gb)) {
251  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
252  return AVERROR_INVALIDDATA;
253  }
254 
255  while (len > 0) {
256  if (len < 17)
257  return AVERROR_INVALIDDATA;
258  class = get_bits(&s->gb, 4);
259  if (class >= 2)
260  return AVERROR_INVALIDDATA;
261  index = get_bits(&s->gb, 4);
262  if (index >= 4)
263  return AVERROR_INVALIDDATA;
264  n = 0;
265  for (i = 1; i <= 16; i++) {
266  bits_table[i] = get_bits(&s->gb, 8);
267  n += bits_table[i];
268  }
269  len -= 17;
270  if (len < n || n > 256)
271  return AVERROR_INVALIDDATA;
272 
273  code_max = 0;
274  for (i = 0; i < n; i++) {
275  v = get_bits(&s->gb, 8);
276  if (v > code_max)
277  code_max = v;
278  val_table[i] = v;
279  }
280  len -= n;
281 
282  /* build VLC and flush previous vlc if present */
283  ff_free_vlc(&s->vlcs[class][index]);
284  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
285  class, index, code_max + 1);
286  if ((ret = build_vlc(&s->vlcs[class][index], bits_table, val_table,
287  code_max + 1, 0, class > 0)) < 0)
288  return ret;
289 
290  if (class > 0) {
291  ff_free_vlc(&s->vlcs[2][index]);
292  if ((ret = build_vlc(&s->vlcs[2][index], bits_table, val_table,
293  code_max + 1, 0, 0)) < 0)
294  return ret;
295  }
296 
297  for (i = 0; i < 16; i++)
298  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
299  for (i = 0; i < 256; i++)
300  s->raw_huffman_values[class][index][i] = val_table[i];
301  }
302  return 0;
303 }
304 
306 {
307  int len, nb_components, i, width, height, bits, ret, size_change;
308  unsigned pix_fmt_id;
309  int h_count[MAX_COMPONENTS] = { 0 };
310  int v_count[MAX_COMPONENTS] = { 0 };
311 
312  s->cur_scan = 0;
313  memset(s->upscale_h, 0, sizeof(s->upscale_h));
314  memset(s->upscale_v, 0, sizeof(s->upscale_v));
315 
316  len = get_bits(&s->gb, 16);
317  bits = get_bits(&s->gb, 8);
318 
319  if (bits > 16 || bits < 1) {
320  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
321  return AVERROR_INVALIDDATA;
322  }
323 
324  if (s->avctx->bits_per_raw_sample != bits) {
325  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
327  init_idct(s->avctx);
328  }
329  if (s->pegasus_rct)
330  bits = 9;
331  if (bits == 9 && !s->pegasus_rct)
332  s->rct = 1; // FIXME ugly
333 
334  if(s->lossless && s->avctx->lowres){
335  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
336  return -1;
337  }
338 
339  height = get_bits(&s->gb, 16);
340  width = get_bits(&s->gb, 16);
341 
342  // HACK for odd_height.mov
343  if (s->interlaced && s->width == width && s->height == height + 1)
344  height= s->height;
345 
346  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
347  if (av_image_check_size(width, height, 0, s->avctx) < 0)
348  return AVERROR_INVALIDDATA;
349  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
350  return AVERROR_INVALIDDATA;
351 
352  nb_components = get_bits(&s->gb, 8);
353  if (nb_components <= 0 ||
354  nb_components > MAX_COMPONENTS)
355  return -1;
356  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
357  if (nb_components != s->nb_components) {
359  "nb_components changing in interlaced picture\n");
360  return AVERROR_INVALIDDATA;
361  }
362  }
363  if (s->ls && !(bits <= 8 || nb_components == 1)) {
365  "JPEG-LS that is not <= 8 "
366  "bits/component or 16-bit gray");
367  return AVERROR_PATCHWELCOME;
368  }
369  if (len != 8 + 3 * nb_components) {
370  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
371  return AVERROR_INVALIDDATA;
372  }
373 
374  s->nb_components = nb_components;
375  s->h_max = 1;
376  s->v_max = 1;
377  for (i = 0; i < nb_components; i++) {
378  /* component id */
379  s->component_id[i] = get_bits(&s->gb, 8) - 1;
380  h_count[i] = get_bits(&s->gb, 4);
381  v_count[i] = get_bits(&s->gb, 4);
382  /* compute hmax and vmax (only used in interleaved case) */
383  if (h_count[i] > s->h_max)
384  s->h_max = h_count[i];
385  if (v_count[i] > s->v_max)
386  s->v_max = v_count[i];
387  s->quant_index[i] = get_bits(&s->gb, 8);
388  if (s->quant_index[i] >= 4) {
389  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
390  return AVERROR_INVALIDDATA;
391  }
392  if (!h_count[i] || !v_count[i]) {
394  "Invalid sampling factor in component %d %d:%d\n",
395  i, h_count[i], v_count[i]);
396  return AVERROR_INVALIDDATA;
397  }
398 
399  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
400  i, h_count[i], v_count[i],
401  s->component_id[i], s->quant_index[i]);
402  }
403  if ( nb_components == 4
404  && s->component_id[0] == 'C' - 1
405  && s->component_id[1] == 'M' - 1
406  && s->component_id[2] == 'Y' - 1
407  && s->component_id[3] == 'K' - 1)
408  s->adobe_transform = 0;
409 
410  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
411  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
412  return AVERROR_PATCHWELCOME;
413  }
414 
415  if (s->bayer) {
416  if (nb_components == 2) {
417  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
418  width stored in their SOF3 markers is the width of each one. We only output
419  a single component, therefore we need to adjust the output image width. We
420  handle the deinterleaving (but not the debayering) in this file. */
421  width *= 2;
422  }
423  /* They can also contain 1 component, which is double the width and half the height
424  of the final image (rows are interleaved). We don't handle the decoding in this
425  file, but leave that to the TIFF/DNG decoder. */
426  }
427 
428  /* if different size, realloc/alloc picture */
429  if (width != s->width || height != s->height || bits != s->bits ||
430  memcmp(s->h_count, h_count, sizeof(h_count)) ||
431  memcmp(s->v_count, v_count, sizeof(v_count))) {
432  size_change = 1;
433 
434  s->width = width;
435  s->height = height;
436  s->bits = bits;
437  memcpy(s->h_count, h_count, sizeof(h_count));
438  memcpy(s->v_count, v_count, sizeof(v_count));
439  s->interlaced = 0;
440  s->got_picture = 0;
441 
442  /* test interlaced mode */
443  if (s->first_picture &&
444  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
445  s->org_height != 0 &&
446  s->height < ((s->org_height * 3) / 4)) {
447  s->interlaced = 1;
451  height *= 2;
452  }
453 
454  ret = ff_set_dimensions(s->avctx, width, height);
455  if (ret < 0)
456  return ret;
457 
458  s->first_picture = 0;
459  } else {
460  size_change = 0;
461  }
462 
463  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
464  if (s->progressive) {
465  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
466  return AVERROR_INVALIDDATA;
467  }
468  } else {
469  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
470  s->rgb = 1;
471  else if (!s->lossless)
472  s->rgb = 0;
473  /* XXX: not complete test ! */
474  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
475  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
476  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
477  (s->h_count[3] << 4) | s->v_count[3];
478  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
479  /* NOTE we do not allocate pictures large enough for the possible
480  * padding of h/v_count being 4 */
481  if (!(pix_fmt_id & 0xD0D0D0D0))
482  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
483  if (!(pix_fmt_id & 0x0D0D0D0D))
484  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
485 
486  for (i = 0; i < 8; i++) {
487  int j = 6 + (i&1) - (i&6);
488  int is = (pix_fmt_id >> (4*i)) & 0xF;
489  int js = (pix_fmt_id >> (4*j)) & 0xF;
490 
491  if (is == 1 && js != 2 && (i < 2 || i > 5))
492  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
493  if (is == 1 && js != 2 && (i < 2 || i > 5))
494  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
495 
496  if (is == 1 && js == 2) {
497  if (i & 1) s->upscale_h[j/2] = 1;
498  else s->upscale_v[j/2] = 1;
499  }
500  }
501 
502  if (s->bayer) {
503  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
504  goto unk_pixfmt;
505  }
506 
507  switch (pix_fmt_id) {
508  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
509  if (!s->bayer)
510  goto unk_pixfmt;
512  break;
513  case 0x11111100:
514  if (s->rgb)
516  else {
517  if ( s->adobe_transform == 0
518  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
520  } else {
524  }
525  }
526  av_assert0(s->nb_components == 3);
527  break;
528  case 0x11111111:
529  if (s->rgb)
531  else {
532  if (s->adobe_transform == 0 && s->bits <= 8) {
534  } else {
537  }
538  }
539  av_assert0(s->nb_components == 4);
540  break;
541  case 0x22111122:
542  case 0x22111111:
543  if (s->adobe_transform == 0 && s->bits <= 8) {
545  s->upscale_v[1] = s->upscale_v[2] = 1;
546  s->upscale_h[1] = s->upscale_h[2] = 1;
547  } else if (s->adobe_transform == 2 && s->bits <= 8) {
549  s->upscale_v[1] = s->upscale_v[2] = 1;
550  s->upscale_h[1] = s->upscale_h[2] = 1;
552  } else {
553  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
556  }
557  av_assert0(s->nb_components == 4);
558  break;
559  case 0x12121100:
560  case 0x22122100:
561  case 0x21211100:
562  case 0x22211200:
563  case 0x22221100:
564  case 0x22112200:
565  case 0x11222200:
567  else
568  goto unk_pixfmt;
570  break;
571  case 0x11000000:
572  case 0x13000000:
573  case 0x14000000:
574  case 0x31000000:
575  case 0x33000000:
576  case 0x34000000:
577  case 0x41000000:
578  case 0x43000000:
579  case 0x44000000:
580  if(s->bits <= 8)
582  else
584  break;
585  case 0x12111100:
586  case 0x14121200:
587  case 0x14111100:
588  case 0x22211100:
589  case 0x22112100:
590  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
591  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
592  else
593  goto unk_pixfmt;
594  s->upscale_v[0] = s->upscale_v[1] = 1;
595  } else {
596  if (pix_fmt_id == 0x14111100)
597  s->upscale_v[1] = s->upscale_v[2] = 1;
599  else
600  goto unk_pixfmt;
602  }
603  break;
604  case 0x21111100:
605  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
606  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
607  else
608  goto unk_pixfmt;
609  s->upscale_h[0] = s->upscale_h[1] = 1;
610  } else {
614  }
615  break;
616  case 0x31111100:
617  if (s->bits > 8)
618  goto unk_pixfmt;
621  s->upscale_h[1] = s->upscale_h[2] = 2;
622  break;
623  case 0x22121100:
624  case 0x22111200:
626  else
627  goto unk_pixfmt;
629  break;
630  case 0x22111100:
631  case 0x23111100:
632  case 0x42111100:
633  case 0x24111100:
637  if (pix_fmt_id == 0x42111100) {
638  if (s->bits > 8)
639  goto unk_pixfmt;
640  s->upscale_h[1] = s->upscale_h[2] = 1;
641  } else if (pix_fmt_id == 0x24111100) {
642  if (s->bits > 8)
643  goto unk_pixfmt;
644  s->upscale_v[1] = s->upscale_v[2] = 1;
645  } else if (pix_fmt_id == 0x23111100) {
646  if (s->bits > 8)
647  goto unk_pixfmt;
648  s->upscale_v[1] = s->upscale_v[2] = 2;
649  }
650  break;
651  case 0x41111100:
653  else
654  goto unk_pixfmt;
656  break;
657  default:
658  unk_pixfmt:
659  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
660  memset(s->upscale_h, 0, sizeof(s->upscale_h));
661  memset(s->upscale_v, 0, sizeof(s->upscale_v));
662  return AVERROR_PATCHWELCOME;
663  }
664  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
665  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
666  return AVERROR_PATCHWELCOME;
667  }
668  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
669  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
670  return AVERROR_PATCHWELCOME;
671  }
672  if (s->ls) {
673  memset(s->upscale_h, 0, sizeof(s->upscale_h));
674  memset(s->upscale_v, 0, sizeof(s->upscale_v));
675  if (s->nb_components == 3) {
677  } else if (s->nb_components != 1) {
678  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
679  return AVERROR_PATCHWELCOME;
680  } else if (s->palette_index && s->bits <= 8)
682  else if (s->bits <= 8)
684  else
686  }
687 
689  if (!s->pix_desc) {
690  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
691  return AVERROR_BUG;
692  }
693 
694  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
695  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
696  } else {
697  enum AVPixelFormat pix_fmts[] = {
698 #if CONFIG_MJPEG_NVDEC_HWACCEL
700 #endif
701 #if CONFIG_MJPEG_VAAPI_HWACCEL
703 #endif
704  s->avctx->pix_fmt,
706  };
707  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
708  if (s->hwaccel_pix_fmt < 0)
709  return AVERROR(EINVAL);
710 
712  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
713  }
714 
715  if (s->avctx->skip_frame == AVDISCARD_ALL) {
717  s->picture_ptr->key_frame = 1;
718  s->got_picture = 1;
719  return 0;
720  }
721 
724  return -1;
726  s->picture_ptr->key_frame = 1;
727  s->got_picture = 1;
728 
729  for (i = 0; i < 4; i++)
730  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
731 
732  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
733  s->width, s->height, s->linesize[0], s->linesize[1],
734  s->interlaced, s->avctx->height);
735 
736  }
737 
738  if ((s->rgb && !s->lossless && !s->ls) ||
739  (!s->rgb && s->ls && s->nb_components > 1) ||
740  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
741  ) {
742  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
743  return AVERROR_PATCHWELCOME;
744  }
745 
746  /* totally blank picture as progressive JPEG will only add details to it */
747  if (s->progressive) {
748  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
749  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
750  for (i = 0; i < s->nb_components; i++) {
751  int size = bw * bh * s->h_count[i] * s->v_count[i];
752  av_freep(&s->blocks[i]);
753  av_freep(&s->last_nnz[i]);
754  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
755  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
756  if (!s->blocks[i] || !s->last_nnz[i])
757  return AVERROR(ENOMEM);
758  s->block_stride[i] = bw * s->h_count[i];
759  }
760  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
761  }
762 
763  if (s->avctx->hwaccel) {
766  if (!s->hwaccel_picture_private)
767  return AVERROR(ENOMEM);
768 
769  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
771  if (ret < 0)
772  return ret;
773  }
774 
775  return 0;
776 }
777 
778 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
779 {
780  int code;
781  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
782  if (code < 0 || code > 16) {
784  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
785  0, dc_index, &s->vlcs[0][dc_index]);
786  return 0xfffff;
787  }
788 
789  if (code)
790  return get_xbits(&s->gb, code);
791  else
792  return 0;
793 }
794 
795 /* decode block and dequantize */
796 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
797  int dc_index, int ac_index, uint16_t *quant_matrix)
798 {
799  int code, i, j, level, val;
800 
801  /* DC coef */
802  val = mjpeg_decode_dc(s, dc_index);
803  if (val == 0xfffff) {
804  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
805  return AVERROR_INVALIDDATA;
806  }
807  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
808  val = av_clip_int16(val);
809  s->last_dc[component] = val;
810  block[0] = val;
811  /* AC coefs */
812  i = 0;
813  {OPEN_READER(re, &s->gb);
814  do {
815  UPDATE_CACHE(re, &s->gb);
816  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
817 
818  i += ((unsigned)code) >> 4;
819  code &= 0xf;
820  if (code) {
821  if (code > MIN_CACHE_BITS - 16)
822  UPDATE_CACHE(re, &s->gb);
823 
824  {
825  int cache = GET_CACHE(re, &s->gb);
826  int sign = (~cache) >> 31;
827  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
828  }
829 
830  LAST_SKIP_BITS(re, &s->gb, code);
831 
832  if (i > 63) {
833  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
834  return AVERROR_INVALIDDATA;
835  }
836  j = s->scantable.permutated[i];
837  block[j] = level * quant_matrix[i];
838  }
839  } while (i < 63);
840  CLOSE_READER(re, &s->gb);}
841 
842  return 0;
843 }
844 
846  int component, int dc_index,
847  uint16_t *quant_matrix, int Al)
848 {
849  unsigned val;
850  s->bdsp.clear_block(block);
851  val = mjpeg_decode_dc(s, dc_index);
852  if (val == 0xfffff) {
853  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
854  return AVERROR_INVALIDDATA;
855  }
856  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
857  s->last_dc[component] = val;
858  block[0] = val;
859  return 0;
860 }
861 
862 /* decode block and dequantize - progressive JPEG version */
864  uint8_t *last_nnz, int ac_index,
865  uint16_t *quant_matrix,
866  int ss, int se, int Al, int *EOBRUN)
867 {
868  int code, i, j, val, run;
869  unsigned level;
870 
871  if (*EOBRUN) {
872  (*EOBRUN)--;
873  return 0;
874  }
875 
876  {
877  OPEN_READER(re, &s->gb);
878  for (i = ss; ; i++) {
879  UPDATE_CACHE(re, &s->gb);
880  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
881 
882  run = ((unsigned) code) >> 4;
883  code &= 0xF;
884  if (code) {
885  i += run;
886  if (code > MIN_CACHE_BITS - 16)
887  UPDATE_CACHE(re, &s->gb);
888 
889  {
890  int cache = GET_CACHE(re, &s->gb);
891  int sign = (~cache) >> 31;
892  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
893  }
894 
895  LAST_SKIP_BITS(re, &s->gb, code);
896 
897  if (i >= se) {
898  if (i == se) {
899  j = s->scantable.permutated[se];
900  block[j] = level * (quant_matrix[se] << Al);
901  break;
902  }
903  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
904  return AVERROR_INVALIDDATA;
905  }
906  j = s->scantable.permutated[i];
907  block[j] = level * (quant_matrix[i] << Al);
908  } else {
909  if (run == 0xF) {// ZRL - skip 15 coefficients
910  i += 15;
911  if (i >= se) {
912  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
913  return AVERROR_INVALIDDATA;
914  }
915  } else {
916  val = (1 << run);
917  if (run) {
918  UPDATE_CACHE(re, &s->gb);
919  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
920  LAST_SKIP_BITS(re, &s->gb, run);
921  }
922  *EOBRUN = val - 1;
923  break;
924  }
925  }
926  }
927  CLOSE_READER(re, &s->gb);
928  }
929 
930  if (i > *last_nnz)
931  *last_nnz = i;
932 
933  return 0;
934 }
935 
936 #define REFINE_BIT(j) { \
937  UPDATE_CACHE(re, &s->gb); \
938  sign = block[j] >> 15; \
939  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
940  ((quant_matrix[i] ^ sign) - sign) << Al; \
941  LAST_SKIP_BITS(re, &s->gb, 1); \
942 }
943 
944 #define ZERO_RUN \
945 for (; ; i++) { \
946  if (i > last) { \
947  i += run; \
948  if (i > se) { \
949  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
950  return -1; \
951  } \
952  break; \
953  } \
954  j = s->scantable.permutated[i]; \
955  if (block[j]) \
956  REFINE_BIT(j) \
957  else if (run-- == 0) \
958  break; \
959 }
960 
961 /* decode block and dequantize - progressive JPEG refinement pass */
963  uint8_t *last_nnz,
964  int ac_index, uint16_t *quant_matrix,
965  int ss, int se, int Al, int *EOBRUN)
966 {
967  int code, i = ss, j, sign, val, run;
968  int last = FFMIN(se, *last_nnz);
969 
970  OPEN_READER(re, &s->gb);
971  if (*EOBRUN) {
972  (*EOBRUN)--;
973  } else {
974  for (; ; i++) {
975  UPDATE_CACHE(re, &s->gb);
976  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
977 
978  if (code & 0xF) {
979  run = ((unsigned) code) >> 4;
980  UPDATE_CACHE(re, &s->gb);
981  val = SHOW_UBITS(re, &s->gb, 1);
982  LAST_SKIP_BITS(re, &s->gb, 1);
983  ZERO_RUN;
984  j = s->scantable.permutated[i];
985  val--;
986  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
987  if (i == se) {
988  if (i > *last_nnz)
989  *last_nnz = i;
990  CLOSE_READER(re, &s->gb);
991  return 0;
992  }
993  } else {
994  run = ((unsigned) code) >> 4;
995  if (run == 0xF) {
996  ZERO_RUN;
997  } else {
998  val = run;
999  run = (1 << run);
1000  if (val) {
1001  UPDATE_CACHE(re, &s->gb);
1002  run += SHOW_UBITS(re, &s->gb, val);
1003  LAST_SKIP_BITS(re, &s->gb, val);
1004  }
1005  *EOBRUN = run - 1;
1006  break;
1007  }
1008  }
1009  }
1010 
1011  if (i > *last_nnz)
1012  *last_nnz = i;
1013  }
1014 
1015  for (; i <= last; i++) {
1016  j = s->scantable.permutated[i];
1017  if (block[j])
1018  REFINE_BIT(j)
1019  }
1020  CLOSE_READER(re, &s->gb);
1021 
1022  return 0;
1023 }
1024 #undef REFINE_BIT
1025 #undef ZERO_RUN
1026 
1027 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1028 {
1029  int i;
1030  int reset = 0;
1031 
1032  if (s->restart_interval) {
1033  s->restart_count--;
1034  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1035  align_get_bits(&s->gb);
1036  for (i = 0; i < nb_components; i++) /* reset dc */
1037  s->last_dc[i] = (4 << s->bits);
1038  }
1039 
1040  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1041  /* skip RSTn */
1042  if (s->restart_count == 0) {
1043  if( show_bits(&s->gb, i) == (1 << i) - 1
1044  || show_bits(&s->gb, i) == 0xFF) {
1045  int pos = get_bits_count(&s->gb);
1046  align_get_bits(&s->gb);
1047  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1048  skip_bits(&s->gb, 8);
1049  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1050  for (i = 0; i < nb_components; i++) /* reset dc */
1051  s->last_dc[i] = (4 << s->bits);
1052  reset = 1;
1053  } else
1054  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1055  }
1056  }
1057  }
1058  return reset;
1059 }
1060 
1061 /* Handles 1 to 4 components */
1062 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1063 {
1064  int i, mb_x, mb_y;
1065  unsigned width;
1066  uint16_t (*buffer)[4];
1067  int left[4], top[4], topleft[4];
1068  const int linesize = s->linesize[0];
1069  const int mask = ((1 << s->bits) - 1) << point_transform;
1070  int resync_mb_y = 0;
1071  int resync_mb_x = 0;
1072  int vpred[6];
1073 
1074  if (!s->bayer && s->nb_components < 3)
1075  return AVERROR_INVALIDDATA;
1076  if (s->bayer && s->nb_components > 2)
1077  return AVERROR_INVALIDDATA;
1078  if (s->nb_components <= 0 || s->nb_components > 4)
1079  return AVERROR_INVALIDDATA;
1080  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1081  return AVERROR_INVALIDDATA;
1082 
1083 
1085 
1086  if (s->restart_interval == 0)
1087  s->restart_interval = INT_MAX;
1088 
1089  if (s->bayer)
1090  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1091  else
1092  width = s->mb_width;
1093 
1094  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1095  if (!s->ljpeg_buffer)
1096  return AVERROR(ENOMEM);
1097 
1098  buffer = s->ljpeg_buffer;
1099 
1100  for (i = 0; i < 4; i++)
1101  buffer[0][i] = 1 << (s->bits - 1);
1102 
1103  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1104  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1105 
1106  if (s->interlaced && s->bottom_field)
1107  ptr += linesize >> 1;
1108 
1109  for (i = 0; i < 4; i++)
1110  top[i] = left[i] = topleft[i] = buffer[0][i];
1111 
1112  if ((mb_y * s->width) % s->restart_interval == 0) {
1113  for (i = 0; i < 6; i++)
1114  vpred[i] = 1 << (s->bits-1);
1115  }
1116 
1117  for (mb_x = 0; mb_x < width; mb_x++) {
1118  int modified_predictor = predictor;
1119 
1120  if (get_bits_left(&s->gb) < 1) {
1121  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1122  return AVERROR_INVALIDDATA;
1123  }
1124 
1125  if (s->restart_interval && !s->restart_count){
1127  resync_mb_x = mb_x;
1128  resync_mb_y = mb_y;
1129  for(i=0; i<4; i++)
1130  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1131  }
1132  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1133  modified_predictor = 1;
1134 
1135  for (i=0;i<nb_components;i++) {
1136  int pred, dc;
1137 
1138  topleft[i] = top[i];
1139  top[i] = buffer[mb_x][i];
1140 
1141  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1142  if(dc == 0xFFFFF)
1143  return -1;
1144 
1145  if (!s->bayer || mb_x) {
1146  pred = left[i];
1147  } else { /* This path runs only for the first line in bayer images */
1148  vpred[i] += dc;
1149  pred = vpred[i] - dc;
1150  }
1151 
1152  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1153 
1154  left[i] = buffer[mb_x][i] =
1155  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1156  }
1157 
1158  if (s->restart_interval && !--s->restart_count) {
1159  align_get_bits(&s->gb);
1160  skip_bits(&s->gb, 16); /* skip RSTn */
1161  }
1162  }
1163  if (s->rct && s->nb_components == 4) {
1164  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1165  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1166  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1167  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1168  ptr[4*mb_x + 0] = buffer[mb_x][3];
1169  }
1170  } else if (s->nb_components == 4) {
1171  for(i=0; i<nb_components; i++) {
1172  int c= s->comp_index[i];
1173  if (s->bits <= 8) {
1174  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1175  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1176  }
1177  } else if(s->bits == 9) {
1178  return AVERROR_PATCHWELCOME;
1179  } else {
1180  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1181  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1182  }
1183  }
1184  }
1185  } else if (s->rct) {
1186  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1187  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1188  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1189  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1190  }
1191  } else if (s->pegasus_rct) {
1192  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1193  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1194  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1195  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1196  }
1197  } else if (s->bayer) {
1198  if (nb_components == 1) {
1199  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1200  for (mb_x = 0; mb_x < width; mb_x++)
1201  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1202  } else if (nb_components == 2) {
1203  for (mb_x = 0; mb_x < width; mb_x++) {
1204  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1205  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1206  }
1207  }
1208  } else {
1209  for(i=0; i<nb_components; i++) {
1210  int c= s->comp_index[i];
1211  if (s->bits <= 8) {
1212  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1213  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1214  }
1215  } else if(s->bits == 9) {
1216  return AVERROR_PATCHWELCOME;
1217  } else {
1218  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1219  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1220  }
1221  }
1222  }
1223  }
1224  }
1225  return 0;
1226 }
1227 
1228 static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
1229  int point_transform, int nb_components)
1230 {
1231  int i, mb_x, mb_y, mask;
1232  int bits= (s->bits+7)&~7;
1233  int resync_mb_y = 0;
1234  int resync_mb_x = 0;
1235 
1236  point_transform += bits - s->bits;
1237  mask = ((1 << s->bits) - 1) << point_transform;
1238 
1239  av_assert0(nb_components>=1 && nb_components<=4);
1240 
1241  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1242  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1243  if (get_bits_left(&s->gb) < 1) {
1244  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1245  return AVERROR_INVALIDDATA;
1246  }
1247  if (s->restart_interval && !s->restart_count){
1249  resync_mb_x = mb_x;
1250  resync_mb_y = mb_y;
1251  }
1252 
1253  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1254  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1255  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1256  for (i = 0; i < nb_components; i++) {
1257  uint8_t *ptr;
1258  uint16_t *ptr16;
1259  int n, h, v, x, y, c, j, linesize;
1260  n = s->nb_blocks[i];
1261  c = s->comp_index[i];
1262  h = s->h_scount[i];
1263  v = s->v_scount[i];
1264  x = 0;
1265  y = 0;
1266  linesize= s->linesize[c];
1267 
1268  if(bits>8) linesize /= 2;
1269 
1270  for(j=0; j<n; j++) {
1271  int pred, dc;
1272 
1273  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1274  if(dc == 0xFFFFF)
1275  return -1;
1276  if ( h * mb_x + x >= s->width
1277  || v * mb_y + y >= s->height) {
1278  // Nothing to do
1279  } else if (bits<=8) {
1280  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1281  if(y==0 && toprow){
1282  if(x==0 && leftcol){
1283  pred= 1 << (bits - 1);
1284  }else{
1285  pred= ptr[-1];
1286  }
1287  }else{
1288  if(x==0 && leftcol){
1289  pred= ptr[-linesize];
1290  }else{
1291  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1292  }
1293  }
1294 
1295  if (s->interlaced && s->bottom_field)
1296  ptr += linesize >> 1;
1297  pred &= mask;
1298  *ptr= pred + ((unsigned)dc << point_transform);
1299  }else{
1300  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1301  if(y==0 && toprow){
1302  if(x==0 && leftcol){
1303  pred= 1 << (bits - 1);
1304  }else{
1305  pred= ptr16[-1];
1306  }
1307  }else{
1308  if(x==0 && leftcol){
1309  pred= ptr16[-linesize];
1310  }else{
1311  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1312  }
1313  }
1314 
1315  if (s->interlaced && s->bottom_field)
1316  ptr16 += linesize >> 1;
1317  pred &= mask;
1318  *ptr16= pred + ((unsigned)dc << point_transform);
1319  }
1320  if (++x == h) {
1321  x = 0;
1322  y++;
1323  }
1324  }
1325  }
1326  } else {
1327  for (i = 0; i < nb_components; i++) {
1328  uint8_t *ptr;
1329  uint16_t *ptr16;
1330  int n, h, v, x, y, c, j, linesize, dc;
1331  n = s->nb_blocks[i];
1332  c = s->comp_index[i];
1333  h = s->h_scount[i];
1334  v = s->v_scount[i];
1335  x = 0;
1336  y = 0;
1337  linesize = s->linesize[c];
1338 
1339  if(bits>8) linesize /= 2;
1340 
1341  for (j = 0; j < n; j++) {
1342  int pred;
1343 
1344  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1345  if(dc == 0xFFFFF)
1346  return -1;
1347  if ( h * mb_x + x >= s->width
1348  || v * mb_y + y >= s->height) {
1349  // Nothing to do
1350  } else if (bits<=8) {
1351  ptr = s->picture_ptr->data[c] +
1352  (linesize * (v * mb_y + y)) +
1353  (h * mb_x + x); //FIXME optimize this crap
1354  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1355 
1356  pred &= mask;
1357  *ptr = pred + ((unsigned)dc << point_transform);
1358  }else{
1359  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1360  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1361 
1362  pred &= mask;
1363  *ptr16= pred + ((unsigned)dc << point_transform);
1364  }
1365 
1366  if (++x == h) {
1367  x = 0;
1368  y++;
1369  }
1370  }
1371  }
1372  }
1373  if (s->restart_interval && !--s->restart_count) {
1374  align_get_bits(&s->gb);
1375  skip_bits(&s->gb, 16); /* skip RSTn */
1376  }
1377  }
1378  }
1379  return 0;
1380 }
1381 
1383  uint8_t *dst, const uint8_t *src,
1384  int linesize, int lowres)
1385 {
1386  switch (lowres) {
1387  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1388  break;
1389  case 1: copy_block4(dst, src, linesize, linesize, 4);
1390  break;
1391  case 2: copy_block2(dst, src, linesize, linesize, 2);
1392  break;
1393  case 3: *dst = *src;
1394  break;
1395  }
1396 }
1397 
1398 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1399 {
1400  int block_x, block_y;
1401  int size = 8 >> s->avctx->lowres;
1402  if (s->bits > 8) {
1403  for (block_y=0; block_y<size; block_y++)
1404  for (block_x=0; block_x<size; block_x++)
1405  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1406  } else {
1407  for (block_y=0; block_y<size; block_y++)
1408  for (block_x=0; block_x<size; block_x++)
1409  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1410  }
1411 }
1412 
1413 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1414  int Al, const uint8_t *mb_bitmask,
1415  int mb_bitmask_size,
1416  const AVFrame *reference)
1417 {
1418  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1420  const uint8_t *reference_data[MAX_COMPONENTS];
1421  int linesize[MAX_COMPONENTS];
1422  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1423  int bytes_per_pixel = 1 + (s->bits > 8);
1424 
1425  if (mb_bitmask) {
1426  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1427  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1428  return AVERROR_INVALIDDATA;
1429  }
1430  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1431  }
1432 
1433  s->restart_count = 0;
1434 
1435  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1436  &chroma_v_shift);
1437  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1438  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1439 
1440  for (i = 0; i < nb_components; i++) {
1441  int c = s->comp_index[i];
1442  data[c] = s->picture_ptr->data[c];
1443  reference_data[c] = reference ? reference->data[c] : NULL;
1444  linesize[c] = s->linesize[c];
1445  s->coefs_finished[c] |= 1;
1446  }
1447 
1448  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1449  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1450  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1451 
1452  if (s->restart_interval && !s->restart_count)
1454 
1455  if (get_bits_left(&s->gb) < 0) {
1456  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1457  -get_bits_left(&s->gb));
1458  return AVERROR_INVALIDDATA;
1459  }
1460  for (i = 0; i < nb_components; i++) {
1461  uint8_t *ptr;
1462  int n, h, v, x, y, c, j;
1463  int block_offset;
1464  n = s->nb_blocks[i];
1465  c = s->comp_index[i];
1466  h = s->h_scount[i];
1467  v = s->v_scount[i];
1468  x = 0;
1469  y = 0;
1470  for (j = 0; j < n; j++) {
1471  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1472  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1473 
1474  if (s->interlaced && s->bottom_field)
1475  block_offset += linesize[c] >> 1;
1476  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1477  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1478  ptr = data[c] + block_offset;
1479  } else
1480  ptr = NULL;
1481  if (!s->progressive) {
1482  if (copy_mb) {
1483  if (ptr)
1484  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1485  linesize[c], s->avctx->lowres);
1486 
1487  } else {
1488  s->bdsp.clear_block(s->block);
1489  if (decode_block(s, s->block, i,
1490  s->dc_index[i], s->ac_index[i],
1491  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1493  "error y=%d x=%d\n", mb_y, mb_x);
1494  return AVERROR_INVALIDDATA;
1495  }
1496  if (ptr) {
1497  s->idsp.idct_put(ptr, linesize[c], s->block);
1498  if (s->bits & 7)
1499  shift_output(s, ptr, linesize[c]);
1500  }
1501  }
1502  } else {
1503  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1504  (h * mb_x + x);
1505  int16_t *block = s->blocks[c][block_idx];
1506  if (Ah)
1507  block[0] += get_bits1(&s->gb) *
1508  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1509  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1510  s->quant_matrixes[s->quant_sindex[i]],
1511  Al) < 0) {
1513  "error y=%d x=%d\n", mb_y, mb_x);
1514  return AVERROR_INVALIDDATA;
1515  }
1516  }
1517  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1518  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1519  mb_x, mb_y, x, y, c, s->bottom_field,
1520  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1521  if (++x == h) {
1522  x = 0;
1523  y++;
1524  }
1525  }
1526  }
1527 
1528  handle_rstn(s, nb_components);
1529  }
1530  }
1531  return 0;
1532 }
1533 
1535  int se, int Ah, int Al)
1536 {
1537  int mb_x, mb_y;
1538  int EOBRUN = 0;
1539  int c = s->comp_index[0];
1540  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1541 
1542  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1543  if (se < ss || se > 63) {
1544  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1545  return AVERROR_INVALIDDATA;
1546  }
1547 
1548  // s->coefs_finished is a bitmask for coefficients coded
1549  // ss and se are parameters telling start and end coefficients
1550  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1551 
1552  s->restart_count = 0;
1553 
1554  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1555  int block_idx = mb_y * s->block_stride[c];
1556  int16_t (*block)[64] = &s->blocks[c][block_idx];
1557  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1558  if (get_bits_left(&s->gb) <= 0) {
1559  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1560  return AVERROR_INVALIDDATA;
1561  }
1562  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1563  int ret;
1564  if (s->restart_interval && !s->restart_count)
1566 
1567  if (Ah)
1568  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1569  quant_matrix, ss, se, Al, &EOBRUN);
1570  else
1571  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1572  quant_matrix, ss, se, Al, &EOBRUN);
1573  if (ret < 0) {
1575  "error y=%d x=%d\n", mb_y, mb_x);
1576  return AVERROR_INVALIDDATA;
1577  }
1578 
1579  if (handle_rstn(s, 0))
1580  EOBRUN = 0;
1581  }
1582  }
1583  return 0;
1584 }
1585 
1587 {
1588  int mb_x, mb_y;
1589  int c;
1590  const int bytes_per_pixel = 1 + (s->bits > 8);
1591  const int block_size = s->lossless ? 1 : 8;
1592 
1593  for (c = 0; c < s->nb_components; c++) {
1594  uint8_t *data = s->picture_ptr->data[c];
1595  int linesize = s->linesize[c];
1596  int h = s->h_max / s->h_count[c];
1597  int v = s->v_max / s->v_count[c];
1598  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1599  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1600 
1601  if (~s->coefs_finished[c])
1602  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1603 
1604  if (s->interlaced && s->bottom_field)
1605  data += linesize >> 1;
1606 
1607  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1608  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1609  int block_idx = mb_y * s->block_stride[c];
1610  int16_t (*block)[64] = &s->blocks[c][block_idx];
1611  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1612  s->idsp.idct_put(ptr, linesize, *block);
1613  if (s->bits & 7)
1614  shift_output(s, ptr, linesize);
1615  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1616  }
1617  }
1618  }
1619 }
1620 
1622  int mb_bitmask_size, const AVFrame *reference)
1623 {
1624  int len, nb_components, i, h, v, predictor, point_transform;
1625  int index, id, ret;
1626  const int block_size = s->lossless ? 1 : 8;
1627  int ilv, prev_shift;
1628 
1629  if (!s->got_picture) {
1631  "Can not process SOS before SOF, skipping\n");
1632  return -1;
1633  }
1634 
1635  if (reference) {
1636  if (reference->width != s->picture_ptr->width ||
1637  reference->height != s->picture_ptr->height ||
1638  reference->format != s->picture_ptr->format) {
1639  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1640  return AVERROR_INVALIDDATA;
1641  }
1642  }
1643 
1644  /* XXX: verify len field validity */
1645  len = get_bits(&s->gb, 16);
1646  nb_components = get_bits(&s->gb, 8);
1647  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1649  "decode_sos: nb_components (%d)",
1650  nb_components);
1651  return AVERROR_PATCHWELCOME;
1652  }
1653  if (len != 6 + 2 * nb_components) {
1654  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1655  return AVERROR_INVALIDDATA;
1656  }
1657  for (i = 0; i < nb_components; i++) {
1658  id = get_bits(&s->gb, 8) - 1;
1659  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1660  /* find component index */
1661  for (index = 0; index < s->nb_components; index++)
1662  if (id == s->component_id[index])
1663  break;
1664  if (index == s->nb_components) {
1666  "decode_sos: index(%d) out of components\n", index);
1667  return AVERROR_INVALIDDATA;
1668  }
1669  /* Metasoft MJPEG codec has Cb and Cr swapped */
1670  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1671  && nb_components == 3 && s->nb_components == 3 && i)
1672  index = 3 - i;
1673 
1674  s->quant_sindex[i] = s->quant_index[index];
1675  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1676  s->h_scount[i] = s->h_count[index];
1677  s->v_scount[i] = s->v_count[index];
1678 
1679  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1680  index = (index+2)%3;
1681 
1682  s->comp_index[i] = index;
1683 
1684  s->dc_index[i] = get_bits(&s->gb, 4);
1685  s->ac_index[i] = get_bits(&s->gb, 4);
1686 
1687  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1688  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1689  goto out_of_range;
1690  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1691  goto out_of_range;
1692  }
1693 
1694  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1695  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1696  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1697  prev_shift = get_bits(&s->gb, 4); /* Ah */
1698  point_transform = get_bits(&s->gb, 4); /* Al */
1699  }else
1700  prev_shift = point_transform = 0;
1701 
1702  if (nb_components > 1) {
1703  /* interleaved stream */
1704  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1705  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1706  } else if (!s->ls) { /* skip this for JPEG-LS */
1707  h = s->h_max / s->h_scount[0];
1708  v = s->v_max / s->v_scount[0];
1709  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1710  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1711  s->nb_blocks[0] = 1;
1712  s->h_scount[0] = 1;
1713  s->v_scount[0] = 1;
1714  }
1715 
1716  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1717  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1718  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1719  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1720  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1721 
1722 
1723  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1724  for (i = s->mjpb_skiptosod; i > 0; i--)
1725  skip_bits(&s->gb, 8);
1726 
1727 next_field:
1728  for (i = 0; i < nb_components; i++)
1729  s->last_dc[i] = (4 << s->bits);
1730 
1731  if (s->avctx->hwaccel) {
1732  int bytes_to_start = get_bits_count(&s->gb) / 8;
1733  av_assert0(bytes_to_start >= 0 &&
1734  s->raw_scan_buffer_size >= bytes_to_start);
1735 
1736  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1737  s->raw_scan_buffer + bytes_to_start,
1738  s->raw_scan_buffer_size - bytes_to_start);
1739  if (ret < 0)
1740  return ret;
1741 
1742  } else if (s->lossless) {
1743  av_assert0(s->picture_ptr == s->picture);
1744  if (CONFIG_JPEGLS_DECODER && s->ls) {
1745 // for () {
1746 // reset_ls_coding_parameters(s, 0);
1747 
1748  if ((ret = ff_jpegls_decode_picture(s, predictor,
1749  point_transform, ilv)) < 0)
1750  return ret;
1751  } else {
1752  if (s->rgb || s->bayer) {
1753  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1754  return ret;
1755  } else {
1756  if ((ret = ljpeg_decode_yuv_scan(s, predictor,
1757  point_transform,
1758  nb_components)) < 0)
1759  return ret;
1760  }
1761  }
1762  } else {
1763  if (s->progressive && predictor) {
1764  av_assert0(s->picture_ptr == s->picture);
1765  if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor,
1766  ilv, prev_shift,
1767  point_transform)) < 0)
1768  return ret;
1769  } else {
1770  if ((ret = mjpeg_decode_scan(s, nb_components,
1771  prev_shift, point_transform,
1772  mb_bitmask, mb_bitmask_size, reference)) < 0)
1773  return ret;
1774  }
1775  }
1776 
1777  if (s->interlaced &&
1778  get_bits_left(&s->gb) > 32 &&
1779  show_bits(&s->gb, 8) == 0xFF) {
1780  GetBitContext bak = s->gb;
1781  align_get_bits(&bak);
1782  if (show_bits(&bak, 16) == 0xFFD1) {
1783  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1784  s->gb = bak;
1785  skip_bits(&s->gb, 16);
1786  s->bottom_field ^= 1;
1787 
1788  goto next_field;
1789  }
1790  }
1791 
1792  emms_c();
1793  return 0;
1794  out_of_range:
1795  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1796  return AVERROR_INVALIDDATA;
1797 }
1798 
1800 {
1801  if (get_bits(&s->gb, 16) != 4)
1802  return AVERROR_INVALIDDATA;
1803  s->restart_interval = get_bits(&s->gb, 16);
1804  s->restart_count = 0;
1805  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1806  s->restart_interval);
1807 
1808  return 0;
1809 }
1810 
1812 {
1813  int len, id, i;
1814 
1815  len = get_bits(&s->gb, 16);
1816  if (len < 6) {
1817  if (s->bayer) {
1818  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1819  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1820  skip_bits(&s->gb, len);
1821  return 0;
1822  } else
1823  return AVERROR_INVALIDDATA;
1824  }
1825  if (8 * len > get_bits_left(&s->gb))
1826  return AVERROR_INVALIDDATA;
1827 
1828  id = get_bits_long(&s->gb, 32);
1829  len -= 6;
1830 
1831  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1832  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1833  av_fourcc2str(av_bswap32(id)), id, len);
1834 
1835  /* Buggy AVID, it puts EOI only at every 10th frame. */
1836  /* Also, this fourcc is used by non-avid files too, it holds some
1837  information, but it's always present in AVID-created files. */
1838  if (id == AV_RB32("AVI1")) {
1839  /* structure:
1840  4bytes AVI1
1841  1bytes polarity
1842  1bytes always zero
1843  4bytes field_size
1844  4bytes field_size_less_padding
1845  */
1846  s->buggy_avid = 1;
1847  i = get_bits(&s->gb, 8); len--;
1848  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1849  goto out;
1850  }
1851 
1852  if (id == AV_RB32("JFIF")) {
1853  int t_w, t_h, v1, v2;
1854  if (len < 8)
1855  goto out;
1856  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1857  v1 = get_bits(&s->gb, 8);
1858  v2 = get_bits(&s->gb, 8);
1859  skip_bits(&s->gb, 8);
1860 
1861  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1862  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1863  if ( s->avctx->sample_aspect_ratio.num <= 0
1864  || s->avctx->sample_aspect_ratio.den <= 0) {
1865  s->avctx->sample_aspect_ratio.num = 0;
1866  s->avctx->sample_aspect_ratio.den = 1;
1867  }
1868 
1869  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1870  av_log(s->avctx, AV_LOG_INFO,
1871  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1872  v1, v2,
1875 
1876  len -= 8;
1877  if (len >= 2) {
1878  t_w = get_bits(&s->gb, 8);
1879  t_h = get_bits(&s->gb, 8);
1880  if (t_w && t_h) {
1881  /* skip thumbnail */
1882  if (len -10 - (t_w * t_h * 3) > 0)
1883  len -= t_w * t_h * 3;
1884  }
1885  len -= 2;
1886  }
1887  goto out;
1888  }
1889 
1890  if ( id == AV_RB32("Adob")
1891  && len >= 7
1892  && show_bits(&s->gb, 8) == 'e'
1893  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1894  skip_bits(&s->gb, 8); /* 'e' */
1895  skip_bits(&s->gb, 16); /* version */
1896  skip_bits(&s->gb, 16); /* flags0 */
1897  skip_bits(&s->gb, 16); /* flags1 */
1898  s->adobe_transform = get_bits(&s->gb, 8);
1899  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1900  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1901  len -= 7;
1902  goto out;
1903  }
1904 
1905  if (id == AV_RB32("LJIF")) {
1906  int rgb = s->rgb;
1907  int pegasus_rct = s->pegasus_rct;
1908  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1909  av_log(s->avctx, AV_LOG_INFO,
1910  "Pegasus lossless jpeg header found\n");
1911  skip_bits(&s->gb, 16); /* version ? */
1912  skip_bits(&s->gb, 16); /* unknown always 0? */
1913  skip_bits(&s->gb, 16); /* unknown always 0? */
1914  skip_bits(&s->gb, 16); /* unknown always 0? */
1915  switch (i=get_bits(&s->gb, 8)) {
1916  case 1:
1917  rgb = 1;
1918  pegasus_rct = 0;
1919  break;
1920  case 2:
1921  rgb = 1;
1922  pegasus_rct = 1;
1923  break;
1924  default:
1925  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1926  }
1927 
1928  len -= 9;
1929  if (s->got_picture)
1930  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1931  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1932  goto out;
1933  }
1934 
1935  s->rgb = rgb;
1936  s->pegasus_rct = pegasus_rct;
1937 
1938  goto out;
1939  }
1940  if (id == AV_RL32("colr") && len > 0) {
1941  s->colr = get_bits(&s->gb, 8);
1942  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1943  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1944  len --;
1945  goto out;
1946  }
1947  if (id == AV_RL32("xfrm") && len > 0) {
1948  s->xfrm = get_bits(&s->gb, 8);
1949  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1950  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1951  len --;
1952  goto out;
1953  }
1954 
1955  /* JPS extension by VRex */
1956  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1957  int flags, layout, type;
1958  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1959  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1960 
1961  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1962  skip_bits(&s->gb, 16); len -= 2; /* block length */
1963  skip_bits(&s->gb, 8); /* reserved */
1964  flags = get_bits(&s->gb, 8);
1965  layout = get_bits(&s->gb, 8);
1966  type = get_bits(&s->gb, 8);
1967  len -= 4;
1968 
1969  av_freep(&s->stereo3d);
1970  s->stereo3d = av_stereo3d_alloc();
1971  if (!s->stereo3d) {
1972  goto out;
1973  }
1974  if (type == 0) {
1976  } else if (type == 1) {
1977  switch (layout) {
1978  case 0x01:
1980  break;
1981  case 0x02:
1983  break;
1984  case 0x03:
1986  break;
1987  }
1988  if (!(flags & 0x04)) {
1990  }
1991  }
1992  goto out;
1993  }
1994 
1995  /* EXIF metadata */
1996  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
1997  GetByteContext gbytes;
1998  int ret, le, ifd_offset, bytes_read;
1999  const uint8_t *aligned;
2000 
2001  skip_bits(&s->gb, 16); // skip padding
2002  len -= 2;
2003 
2004  // init byte wise reading
2005  aligned = align_get_bits(&s->gb);
2006  bytestream2_init(&gbytes, aligned, len);
2007 
2008  // read TIFF header
2009  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2010  if (ret) {
2011  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2012  } else {
2013  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2014 
2015  // read 0th IFD and store the metadata
2016  // (return values > 0 indicate the presence of subimage metadata)
2017  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2018  if (ret < 0) {
2019  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2020  }
2021  }
2022 
2023  bytes_read = bytestream2_tell(&gbytes);
2024  skip_bits(&s->gb, bytes_read << 3);
2025  len -= bytes_read;
2026 
2027  goto out;
2028  }
2029 
2030  /* Apple MJPEG-A */
2031  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2032  id = get_bits_long(&s->gb, 32);
2033  len -= 4;
2034  /* Apple MJPEG-A */
2035  if (id == AV_RB32("mjpg")) {
2036  /* structure:
2037  4bytes field size
2038  4bytes pad field size
2039  4bytes next off
2040  4bytes quant off
2041  4bytes huff off
2042  4bytes image off
2043  4bytes scan off
2044  4bytes data off
2045  */
2046  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2047  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2048  }
2049  }
2050 
2051  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2052  int id2;
2053  unsigned seqno;
2054  unsigned nummarkers;
2055 
2056  id = get_bits_long(&s->gb, 32);
2057  id2 = get_bits(&s->gb, 24);
2058  len -= 7;
2059  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2060  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2061  goto out;
2062  }
2063 
2064  skip_bits(&s->gb, 8);
2065  seqno = get_bits(&s->gb, 8);
2066  len -= 2;
2067  if (seqno == 0) {
2068  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2069  goto out;
2070  }
2071 
2072  nummarkers = get_bits(&s->gb, 8);
2073  len -= 1;
2074  if (nummarkers == 0) {
2075  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2076  goto out;
2077  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2078  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2079  goto out;
2080  } else if (seqno > nummarkers) {
2081  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2082  goto out;
2083  }
2084 
2085  /* Allocate if this is the first APP2 we've seen. */
2086  if (s->iccnum == 0) {
2087  s->iccdata = av_mallocz(nummarkers * sizeof(*(s->iccdata)));
2088  s->iccdatalens = av_mallocz(nummarkers * sizeof(*(s->iccdatalens)));
2089  if (!s->iccdata || !s->iccdatalens) {
2090  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2091  return AVERROR(ENOMEM);
2092  }
2093  s->iccnum = nummarkers;
2094  }
2095 
2096  if (s->iccdata[seqno - 1]) {
2097  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2098  goto out;
2099  }
2100 
2101  s->iccdatalens[seqno - 1] = len;
2102  s->iccdata[seqno - 1] = av_malloc(len);
2103  if (!s->iccdata[seqno - 1]) {
2104  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2105  return AVERROR(ENOMEM);
2106  }
2107 
2108  memcpy(s->iccdata[seqno - 1], align_get_bits(&s->gb), len);
2109  skip_bits(&s->gb, len << 3);
2110  len = 0;
2111  s->iccread++;
2112 
2113  if (s->iccread > s->iccnum)
2114  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2115  }
2116 
2117 out:
2118  /* slow but needed for extreme adobe jpegs */
2119  if (len < 0)
2121  "mjpeg: error, decode_app parser read over the end\n");
2122  while (--len > 0)
2123  skip_bits(&s->gb, 8);
2124 
2125  return 0;
2126 }
2127 
2129 {
2130  int len = get_bits(&s->gb, 16);
2131  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2132  int i;
2133  char *cbuf = av_malloc(len - 1);
2134  if (!cbuf)
2135  return AVERROR(ENOMEM);
2136 
2137  for (i = 0; i < len - 2; i++)
2138  cbuf[i] = get_bits(&s->gb, 8);
2139  if (i > 0 && cbuf[i - 1] == '\n')
2140  cbuf[i - 1] = 0;
2141  else
2142  cbuf[i] = 0;
2143 
2144  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2145  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2146 
2147  /* buggy avid, it puts EOI only at every 10th frame */
2148  if (!strncmp(cbuf, "AVID", 4)) {
2149  parse_avid(s, cbuf, len);
2150  } else if (!strcmp(cbuf, "CS=ITU601"))
2151  s->cs_itu601 = 1;
2152  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2153  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2154  s->flipped = 1;
2155  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2156  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2157  s->multiscope = 2;
2158  }
2159 
2160  av_free(cbuf);
2161  }
2162 
2163  return 0;
2164 }
2165 
2166 /* return the 8 bit start code value and update the search
2167  state. Return -1 if no start code found */
2168 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2169 {
2170  const uint8_t *buf_ptr;
2171  unsigned int v, v2;
2172  int val;
2173  int skipped = 0;
2174 
2175  buf_ptr = *pbuf_ptr;
2176  while (buf_end - buf_ptr > 1) {
2177  v = *buf_ptr++;
2178  v2 = *buf_ptr;
2179  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2180  val = *buf_ptr++;
2181  goto found;
2182  }
2183  skipped++;
2184  }
2185  buf_ptr = buf_end;
2186  val = -1;
2187 found:
2188  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2189  *pbuf_ptr = buf_ptr;
2190  return val;
2191 }
2192 
2194  const uint8_t **buf_ptr, const uint8_t *buf_end,
2195  const uint8_t **unescaped_buf_ptr,
2196  int *unescaped_buf_size)
2197 {
2198  int start_code;
2199  start_code = find_marker(buf_ptr, buf_end);
2200 
2201  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2202  if (!s->buffer)
2203  return AVERROR(ENOMEM);
2204 
2205  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2206  if (start_code == SOS && !s->ls) {
2207  const uint8_t *src = *buf_ptr;
2208  const uint8_t *ptr = src;
2209  uint8_t *dst = s->buffer;
2210 
2211  #define copy_data_segment(skip) do { \
2212  ptrdiff_t length = (ptr - src) - (skip); \
2213  if (length > 0) { \
2214  memcpy(dst, src, length); \
2215  dst += length; \
2216  src = ptr; \
2217  } \
2218  } while (0)
2219 
2220  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2221  ptr = buf_end;
2222  copy_data_segment(0);
2223  } else {
2224  while (ptr < buf_end) {
2225  uint8_t x = *(ptr++);
2226 
2227  if (x == 0xff) {
2228  ptrdiff_t skip = 0;
2229  while (ptr < buf_end && x == 0xff) {
2230  x = *(ptr++);
2231  skip++;
2232  }
2233 
2234  /* 0xFF, 0xFF, ... */
2235  if (skip > 1) {
2236  copy_data_segment(skip);
2237 
2238  /* decrement src as it is equal to ptr after the
2239  * copy_data_segment macro and we might want to
2240  * copy the current value of x later on */
2241  src--;
2242  }
2243 
2244  if (x < RST0 || x > RST7) {
2245  copy_data_segment(1);
2246  if (x)
2247  break;
2248  }
2249  }
2250  }
2251  if (src < ptr)
2252  copy_data_segment(0);
2253  }
2254  #undef copy_data_segment
2255 
2256  *unescaped_buf_ptr = s->buffer;
2257  *unescaped_buf_size = dst - s->buffer;
2258  memset(s->buffer + *unescaped_buf_size, 0,
2260 
2261  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2262  (buf_end - *buf_ptr) - (dst - s->buffer));
2263  } else if (start_code == SOS && s->ls) {
2264  const uint8_t *src = *buf_ptr;
2265  uint8_t *dst = s->buffer;
2266  int bit_count = 0;
2267  int t = 0, b = 0;
2268  PutBitContext pb;
2269 
2270  /* find marker */
2271  while (src + t < buf_end) {
2272  uint8_t x = src[t++];
2273  if (x == 0xff) {
2274  while ((src + t < buf_end) && x == 0xff)
2275  x = src[t++];
2276  if (x & 0x80) {
2277  t -= FFMIN(2, t);
2278  break;
2279  }
2280  }
2281  }
2282  bit_count = t * 8;
2283  init_put_bits(&pb, dst, t);
2284 
2285  /* unescape bitstream */
2286  while (b < t) {
2287  uint8_t x = src[b++];
2288  put_bits(&pb, 8, x);
2289  if (x == 0xFF && b < t) {
2290  x = src[b++];
2291  if (x & 0x80) {
2292  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2293  x &= 0x7f;
2294  }
2295  put_bits(&pb, 7, x);
2296  bit_count--;
2297  }
2298  }
2299  flush_put_bits(&pb);
2300 
2301  *unescaped_buf_ptr = dst;
2302  *unescaped_buf_size = (bit_count + 7) >> 3;
2303  memset(s->buffer + *unescaped_buf_size, 0,
2305  } else {
2306  *unescaped_buf_ptr = *buf_ptr;
2307  *unescaped_buf_size = buf_end - *buf_ptr;
2308  }
2309 
2310  return start_code;
2311 }
2312 
2314 {
2315  int i;
2316 
2317  if (s->iccdata)
2318  for (i = 0; i < s->iccnum; i++)
2319  av_freep(&s->iccdata[i]);
2320  av_freep(&s->iccdata);
2321  av_freep(&s->iccdatalens);
2322 
2323  s->iccread = 0;
2324  s->iccnum = 0;
2325 }
2326 
2327 int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2328  AVPacket *avpkt)
2329 {
2330  AVFrame *frame = data;
2331  const uint8_t *buf = avpkt->data;
2332  int buf_size = avpkt->size;
2333  MJpegDecodeContext *s = avctx->priv_data;
2334  const uint8_t *buf_end, *buf_ptr;
2335  const uint8_t *unescaped_buf_ptr;
2336  int hshift, vshift;
2337  int unescaped_buf_size;
2338  int start_code;
2339  int i, index;
2340  int ret = 0;
2341  int is16bit;
2342 
2343  s->buf_size = buf_size;
2344 
2346  av_freep(&s->stereo3d);
2347  s->adobe_transform = -1;
2348 
2349  if (s->iccnum != 0)
2350  reset_icc_profile(s);
2351 
2352  buf_ptr = buf;
2353  buf_end = buf + buf_size;
2354  while (buf_ptr < buf_end) {
2355  /* find start next marker */
2356  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2357  &unescaped_buf_ptr,
2358  &unescaped_buf_size);
2359  /* EOF */
2360  if (start_code < 0) {
2361  break;
2362  } else if (unescaped_buf_size > INT_MAX / 8) {
2363  av_log(avctx, AV_LOG_ERROR,
2364  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2365  start_code, unescaped_buf_size, buf_size);
2366  return AVERROR_INVALIDDATA;
2367  }
2368  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2369  start_code, buf_end - buf_ptr);
2370 
2371  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2372 
2373  if (ret < 0) {
2374  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2375  goto fail;
2376  }
2377 
2378  s->start_code = start_code;
2379  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2380  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2381 
2382  /* process markers */
2383  if (start_code >= RST0 && start_code <= RST7) {
2384  av_log(avctx, AV_LOG_DEBUG,
2385  "restart marker: %d\n", start_code & 0x0f);
2386  /* APP fields */
2387  } else if (start_code >= APP0 && start_code <= APP15) {
2388  if ((ret = mjpeg_decode_app(s)) < 0)
2389  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2390  av_err2str(ret));
2391  /* Comment */
2392  } else if (start_code == COM) {
2393  ret = mjpeg_decode_com(s);
2394  if (ret < 0)
2395  return ret;
2396  } else if (start_code == DQT) {
2397  ret = ff_mjpeg_decode_dqt(s);
2398  if (ret < 0)
2399  return ret;
2400  }
2401 
2402  ret = -1;
2403 
2404  if (!CONFIG_JPEGLS_DECODER &&
2405  (start_code == SOF48 || start_code == LSE)) {
2406  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2407  return AVERROR(ENOSYS);
2408  }
2409 
2410  if (avctx->skip_frame == AVDISCARD_ALL) {
2411  switch(start_code) {
2412  case SOF0:
2413  case SOF1:
2414  case SOF2:
2415  case SOF3:
2416  case SOF48:
2417  case SOI:
2418  case SOS:
2419  case EOI:
2420  break;
2421  default:
2422  goto skip;
2423  }
2424  }
2425 
2426  switch (start_code) {
2427  case SOI:
2428  s->restart_interval = 0;
2429  s->restart_count = 0;
2430  s->raw_image_buffer = buf_ptr;
2431  s->raw_image_buffer_size = buf_end - buf_ptr;
2432  /* nothing to do on SOI */
2433  break;
2434  case DHT:
2435  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2436  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2437  goto fail;
2438  }
2439  break;
2440  case SOF0:
2441  case SOF1:
2442  if (start_code == SOF0)
2444  else
2446  s->lossless = 0;
2447  s->ls = 0;
2448  s->progressive = 0;
2449  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2450  goto fail;
2451  break;
2452  case SOF2:
2454  s->lossless = 0;
2455  s->ls = 0;
2456  s->progressive = 1;
2457  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2458  goto fail;
2459  break;
2460  case SOF3:
2463  s->lossless = 1;
2464  s->ls = 0;
2465  s->progressive = 0;
2466  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2467  goto fail;
2468  break;
2469  case SOF48:
2472  s->lossless = 1;
2473  s->ls = 1;
2474  s->progressive = 0;
2475  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2476  goto fail;
2477  break;
2478  case LSE:
2479  if (!CONFIG_JPEGLS_DECODER ||
2480  (ret = ff_jpegls_decode_lse(s)) < 0)
2481  goto fail;
2482  break;
2483  case EOI:
2484 eoi_parser:
2485  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2486  s->progressive && s->cur_scan && s->got_picture)
2488  s->cur_scan = 0;
2489  if (!s->got_picture) {
2490  av_log(avctx, AV_LOG_WARNING,
2491  "Found EOI before any SOF, ignoring\n");
2492  break;
2493  }
2494  if (s->interlaced) {
2495  s->bottom_field ^= 1;
2496  /* if not bottom field, do not output image yet */
2497  if (s->bottom_field == !s->interlace_polarity)
2498  break;
2499  }
2500  if (avctx->skip_frame == AVDISCARD_ALL) {
2501  s->got_picture = 0;
2502  goto the_end_no_picture;
2503  }
2504  if (s->avctx->hwaccel) {
2505  ret = s->avctx->hwaccel->end_frame(s->avctx);
2506  if (ret < 0)
2507  return ret;
2508 
2510  }
2511  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2512  return ret;
2513  *got_frame = 1;
2514  s->got_picture = 0;
2515 
2516  if (!s->lossless) {
2517  int qp = FFMAX3(s->qscale[0],
2518  s->qscale[1],
2519  s->qscale[2]);
2520  int qpw = (s->width + 15) / 16;
2521  AVBufferRef *qp_table_buf = av_buffer_alloc(qpw);
2522  if (qp_table_buf) {
2523  memset(qp_table_buf->data, qp, qpw);
2524  av_frame_set_qp_table(data, qp_table_buf, 0, FF_QSCALE_TYPE_MPEG1);
2525  }
2526 
2527  if(avctx->debug & FF_DEBUG_QP)
2528  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2529  }
2530 
2531  goto the_end;
2532  case SOS:
2533  s->raw_scan_buffer = buf_ptr;
2534  s->raw_scan_buffer_size = buf_end - buf_ptr;
2535 
2536  s->cur_scan++;
2537  if (avctx->skip_frame == AVDISCARD_ALL) {
2538  skip_bits(&s->gb, get_bits_left(&s->gb));
2539  break;
2540  }
2541 
2542  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2543  (avctx->err_recognition & AV_EF_EXPLODE))
2544  goto fail;
2545  break;
2546  case DRI:
2547  if ((ret = mjpeg_decode_dri(s)) < 0)
2548  return ret;
2549  break;
2550  case SOF5:
2551  case SOF6:
2552  case SOF7:
2553  case SOF9:
2554  case SOF10:
2555  case SOF11:
2556  case SOF13:
2557  case SOF14:
2558  case SOF15:
2559  case JPG:
2560  av_log(avctx, AV_LOG_ERROR,
2561  "mjpeg: unsupported coding type (%x)\n", start_code);
2562  break;
2563  }
2564 
2565 skip:
2566  /* eof process start code */
2567  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2568  av_log(avctx, AV_LOG_DEBUG,
2569  "marker parser used %d bytes (%d bits)\n",
2570  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2571  }
2572  if (s->got_picture && s->cur_scan) {
2573  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2574  goto eoi_parser;
2575  }
2576  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2577  return AVERROR_INVALIDDATA;
2578 fail:
2579  s->got_picture = 0;
2580  return ret;
2581 the_end:
2582 
2583  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2584 
2585  if (AV_RB32(s->upscale_h)) {
2586  int p;
2588  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2589  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2590  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2591  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2592  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2593  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2594  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2595  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2596  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2597  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2598  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2599  );
2600  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2601  if (ret)
2602  return ret;
2603 
2605  for (p = 0; p<s->nb_components; p++) {
2606  uint8_t *line = s->picture_ptr->data[p];
2607  int w = s->width;
2608  int h = s->height;
2609  if (!s->upscale_h[p])
2610  continue;
2611  if (p==1 || p==2) {
2612  w = AV_CEIL_RSHIFT(w, hshift);
2613  h = AV_CEIL_RSHIFT(h, vshift);
2614  }
2615  if (s->upscale_v[p] == 1)
2616  h = (h+1)>>1;
2617  av_assert0(w > 0);
2618  for (i = 0; i < h; i++) {
2619  if (s->upscale_h[p] == 1) {
2620  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2621  else line[w - 1] = line[(w - 1) / 2];
2622  for (index = w - 2; index > 0; index--) {
2623  if (is16bit)
2624  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2625  else
2626  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2627  }
2628  } else if (s->upscale_h[p] == 2) {
2629  if (is16bit) {
2630  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2631  if (w > 1)
2632  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2633  } else {
2634  line[w - 1] = line[(w - 1) / 3];
2635  if (w > 1)
2636  line[w - 2] = line[w - 1];
2637  }
2638  for (index = w - 3; index > 0; index--) {
2639  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2640  }
2641  }
2642  line += s->linesize[p];
2643  }
2644  }
2645  }
2646  if (AV_RB32(s->upscale_v)) {
2647  int p;
2649  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2650  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2651  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2652  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2653  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2654  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2655  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2656  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2657  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2658  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2659  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2660  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2661  );
2662  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2663  if (ret)
2664  return ret;
2665 
2667  for (p = 0; p < s->nb_components; p++) {
2668  uint8_t *dst;
2669  int w = s->width;
2670  int h = s->height;
2671  if (!s->upscale_v[p])
2672  continue;
2673  if (p==1 || p==2) {
2674  w = AV_CEIL_RSHIFT(w, hshift);
2675  h = AV_CEIL_RSHIFT(h, vshift);
2676  }
2677  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2678  for (i = h - 1; i; i--) {
2679  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2680  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2681  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2682  memcpy(dst, src1, w);
2683  } else {
2684  for (index = 0; index < w; index++)
2685  dst[index] = (src1[index] + src2[index]) >> 1;
2686  }
2687  dst -= s->linesize[p];
2688  }
2689  }
2690  }
2691  if (s->flipped && !s->rgb) {
2692  int j;
2693  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2694  if (ret)
2695  return ret;
2696 
2698  for (index=0; index<s->nb_components; index++) {
2699  uint8_t *dst = s->picture_ptr->data[index];
2700  int w = s->picture_ptr->width;
2701  int h = s->picture_ptr->height;
2702  if(index && index<3){
2703  w = AV_CEIL_RSHIFT(w, hshift);
2704  h = AV_CEIL_RSHIFT(h, vshift);
2705  }
2706  if(dst){
2707  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2708  for (i=0; i<h/2; i++) {
2709  for (j=0; j<w; j++)
2710  FFSWAP(int, dst[j], dst2[j]);
2711  dst += s->picture_ptr->linesize[index];
2712  dst2 -= s->picture_ptr->linesize[index];
2713  }
2714  }
2715  }
2716  }
2717  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2718  int w = s->picture_ptr->width;
2719  int h = s->picture_ptr->height;
2720  av_assert0(s->nb_components == 4);
2721  for (i=0; i<h; i++) {
2722  int j;
2723  uint8_t *dst[4];
2724  for (index=0; index<4; index++) {
2725  dst[index] = s->picture_ptr->data[index]
2726  + s->picture_ptr->linesize[index]*i;
2727  }
2728  for (j=0; j<w; j++) {
2729  int k = dst[3][j];
2730  int r = dst[0][j] * k;
2731  int g = dst[1][j] * k;
2732  int b = dst[2][j] * k;
2733  dst[0][j] = g*257 >> 16;
2734  dst[1][j] = b*257 >> 16;
2735  dst[2][j] = r*257 >> 16;
2736  dst[3][j] = 255;
2737  }
2738  }
2739  }
2740  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2741  int w = s->picture_ptr->width;
2742  int h = s->picture_ptr->height;
2743  av_assert0(s->nb_components == 4);
2744  for (i=0; i<h; i++) {
2745  int j;
2746  uint8_t *dst[4];
2747  for (index=0; index<4; index++) {
2748  dst[index] = s->picture_ptr->data[index]
2749  + s->picture_ptr->linesize[index]*i;
2750  }
2751  for (j=0; j<w; j++) {
2752  int k = dst[3][j];
2753  int r = (255 - dst[0][j]) * k;
2754  int g = (128 - dst[1][j]) * k;
2755  int b = (128 - dst[2][j]) * k;
2756  dst[0][j] = r*257 >> 16;
2757  dst[1][j] = (g*257 >> 16) + 128;
2758  dst[2][j] = (b*257 >> 16) + 128;
2759  dst[3][j] = 255;
2760  }
2761  }
2762  }
2763 
2764  if (s->stereo3d) {
2765  AVStereo3D *stereo = av_stereo3d_create_side_data(data);
2766  if (stereo) {
2767  stereo->type = s->stereo3d->type;
2768  stereo->flags = s->stereo3d->flags;
2769  }
2770  av_freep(&s->stereo3d);
2771  }
2772 
2773  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2774  AVFrameSideData *sd;
2775  size_t offset = 0;
2776  int total_size = 0;
2777  int i;
2778 
2779  /* Sum size of all parts. */
2780  for (i = 0; i < s->iccnum; i++)
2781  total_size += s->iccdatalens[i];
2782 
2783  sd = av_frame_new_side_data(data, AV_FRAME_DATA_ICC_PROFILE, total_size);
2784  if (!sd) {
2785  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2786  return AVERROR(ENOMEM);
2787  }
2788 
2789  /* Reassemble the parts, which are now in-order. */
2790  for (i = 0; i < s->iccnum; i++) {
2791  memcpy(sd->data + offset, s->iccdata[i], s->iccdatalens[i]);
2792  offset += s->iccdatalens[i];
2793  }
2794  }
2795 
2796  av_dict_copy(&((AVFrame *) data)->metadata, s->exif_metadata, 0);
2798 
2799 the_end_no_picture:
2800  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2801  buf_end - buf_ptr);
2802 // return buf_end - buf_ptr;
2803  return buf_ptr - buf;
2804 }
2805 
2807 {
2808  MJpegDecodeContext *s = avctx->priv_data;
2809  int i, j;
2810 
2811  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2812  av_log(avctx, AV_LOG_INFO, "Single field\n");
2813  }
2814 
2815  if (s->picture) {
2816  av_frame_free(&s->picture);
2817  s->picture_ptr = NULL;
2818  } else if (s->picture_ptr)
2820 
2821  av_freep(&s->buffer);
2822  av_freep(&s->stereo3d);
2823  av_freep(&s->ljpeg_buffer);
2824  s->ljpeg_buffer_size = 0;
2825 
2826  for (i = 0; i < 3; i++) {
2827  for (j = 0; j < 4; j++)
2828  ff_free_vlc(&s->vlcs[i][j]);
2829  }
2830  for (i = 0; i < MAX_COMPONENTS; i++) {
2831  av_freep(&s->blocks[i]);
2832  av_freep(&s->last_nnz[i]);
2833  }
2835 
2836  reset_icc_profile(s);
2837 
2839 
2840  return 0;
2841 }
2842 
2843 static void decode_flush(AVCodecContext *avctx)
2844 {
2845  MJpegDecodeContext *s = avctx->priv_data;
2846  s->got_picture = 0;
2847 }
2848 
2849 #if CONFIG_MJPEG_DECODER
2850 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2851 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2852 static const AVOption options[] = {
2853  { "extern_huff", "Use external huffman table.",
2854  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2855  { NULL },
2856 };
2857 
2858 static const AVClass mjpegdec_class = {
2859  .class_name = "MJPEG decoder",
2860  .item_name = av_default_item_name,
2861  .option = options,
2862  .version = LIBAVUTIL_VERSION_INT,
2863 };
2864 
2866  .name = "mjpeg",
2867  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2868  .type = AVMEDIA_TYPE_VIDEO,
2869  .id = AV_CODEC_ID_MJPEG,
2870  .priv_data_size = sizeof(MJpegDecodeContext),
2872  .close = ff_mjpeg_decode_end,
2874  .flush = decode_flush,
2875  .capabilities = AV_CODEC_CAP_DR1,
2876  .max_lowres = 3,
2877  .priv_class = &mjpegdec_class,
2879  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2881  .hw_configs = (const AVCodecHWConfigInternal*[]) {
2882 #if CONFIG_MJPEG_NVDEC_HWACCEL
2883  HWACCEL_NVDEC(mjpeg),
2884 #endif
2885 #if CONFIG_MJPEG_VAAPI_HWACCEL
2886  HWACCEL_VAAPI(mjpeg),
2887 #endif
2888  NULL
2889  },
2890 };
2891 #endif
2892 #if CONFIG_THP_DECODER
2894  .name = "thp",
2895  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2896  .type = AVMEDIA_TYPE_VIDEO,
2897  .id = AV_CODEC_ID_THP,
2898  .priv_data_size = sizeof(MJpegDecodeContext),
2900  .close = ff_mjpeg_decode_end,
2902  .flush = decode_flush,
2903  .capabilities = AV_CODEC_CAP_DR1,
2904  .max_lowres = 3,
2905  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
2906 };
2907 #endif
int block_stride[MAX_COMPONENTS]
Definition: mjpegdec.h:86
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:55
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1292
const struct AVCodec * codec
Definition: avcodec.h:535
const AVPixFmtDescriptor * pix_desc
!< stereoscopic information (cached, since it is read before frame allocation)
Definition: mjpegdec.h:136
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
Definition: mjpeg.h:81
int v_count[MAX_COMPONENTS]
Definition: mjpegdec.h:89
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:273
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:122
AVOption.
Definition: opt.h:248
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
enum AVPixelFormat hwaccel_sw_pix_fmt
Definition: mjpegdec.h:152
Definition: mjpeg.h:71
Definition: mjpeg.h:111
Definition: mjpeg.h:73
float re
Definition: fft.c:82
Definition: mjpeg.h:40
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
Definition: mjpeg.h:42
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:389
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
size_t raw_image_buffer_size
Definition: mjpegdec.h:145
void(* clear_block)(int16_t *block)
Definition: blockdsp.h:36
#define avpriv_request_sample(...)
int h_scount[MAX_COMPONENTS]
Definition: mjpegdec.h:94
BlockDSPContext bdsp
Definition: mjpegdec.h:111
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:200
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2128
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
TIFF constants & data structures.
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:270
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
int qscale[4]
quantizer scale calculated from quant_matrixes
Definition: mjpegdec.h:58
int size
Definition: packet.h:364
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
uint8_t * buffer
Definition: mjpegdec.h:54
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:905
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
#define copy_data_segment(skip)
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
GLint GLenum type
Definition: opengl_enc.c:104
Definition: mjpeg.h:68
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:72
int dc_index[MAX_COMPONENTS]
Definition: mjpegdec.h:91
Definition: mjpeg.h:75
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
Definition: mjpeg.h:53
int linesize[MAX_COMPONENTS]
linesize << interlaced
Definition: mjpegdec.h:103
discard all
Definition: avcodec.h:236
uint8_t permutated[64]
Definition: idctdsp.h:33
Views are next to each other.
Definition: stereo3d.h:67
uint8_t upscale_v[4]
Definition: mjpegdec.h:70
uint8_t run
Definition: svq3.c:204
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:796
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
int profile
profile
Definition: avcodec.h:1859
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:241
AVCodec.
Definition: codec.h:190
EXIF metadata parser.
JPEG-LS decoder.
MJPEG encoder and decoder.
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1958
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int comp_index[MAX_COMPONENTS]
Definition: mjpegdec.h:90
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2313
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1586
HpelDSPContext hdsp
Definition: mjpegdec.h:112
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:649
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1956
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2004
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
const uint8_t * raw_image_buffer
Definition: mjpegdec.h:144
int16_t block[64]
Definition: mjpegdec.h:105
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
Definition: jpegtables.c:127
Definition: mjpeg.h:72
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1799
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:176
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1612
uint16_t(* ljpeg_buffer)[4]
Definition: mjpegdec.h:128
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Definition: mjpeg.h:46
unsigned int ljpeg_buffer_size
Definition: mjpegdec.h:129
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:456
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:2192
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1960
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
Definition: mjpeg.h:54
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
uint8_t * last_nnz[MAX_COMPONENTS]
Definition: mjpegdec.h:107
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
AVFrame * picture_ptr
Definition: mjpegdec.h:101
Structure to hold side data for an AVFrame.
Definition: frame.h:214
#define height
uint8_t * data
Definition: packet.h:363
int quant_sindex[MAX_COMPONENTS]
Definition: mjpegdec.h:96
#define MAX_COMPONENTS
Definition: mjpegdec.h:44
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1959
int h_count[MAX_COMPONENTS]
Definition: mjpegdec.h:88
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define ff_dlog(a,...)
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:390
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:455
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1765
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
ptrdiff_t size
Definition: opengl_enc.c:100
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:441
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
#define av_log(a,...)
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
static int aligned(int val)
Definition: dashdec.c:171
#define src
Definition: vp8dsp.c:254
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2327
enum AVCodecID id
Definition: codec.h:204
AVDictionary * exif_metadata
Definition: mjpegdec.h:132
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:863
uint8_t ** iccdata
Definition: mjpegdec.h:138
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1027
static const uint16_t mask[17]
Definition: lzw.c:38
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:962
#define PTRDIFF_SPECIFIER
Definition: internal.h:228
int nb_blocks[MAX_COMPONENTS]
Definition: mjpegdec.h:93
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2806
VLC vlcs[3][4]
Definition: mjpegdec.h:57
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:119
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
const char * r
Definition: vf_curves.c:114
unsigned int pos
Definition: spdifenc.c:410
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:443
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your local see the OFFSET() macro
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
Definition: graph2dot.c:48
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: codec.h:197
uint8_t bits
Definition: vp3data.h:202
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1413
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2168
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:123
Definition: mjpeg.h:39
Definition: mjpeg.h:70
Definition: vlc.h:26
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
Definition: mjpegdec.c:53
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
JPEG-LS.
Definition: mjpeg.h:103
Definition: mjpeg.h:79
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
ScanTable scantable
Definition: mjpegdec.h:110
Definition: mjpeg.h:80
#define b
Definition: input.c:41
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1382
Definition: mjpeg.h:56
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:305
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:418
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define FFMIN(a, b)
Definition: common.h:96
Definition: mjpeg.h:44
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
uint8_t interlaced
Definition: mxfenc.c:2168
#define width
int component_id[MAX_COMPONENTS]
Definition: mjpegdec.h:87
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1811
#define NEG_USR32(a, s)
Definition: mathops.h:166
uint8_t w
Definition: llviddspenc.c:38
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
uint8_t raw_huffman_lengths[2][4][16]
Definition: mjpegdec.h:149
Definition: mjpeg.h:41
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1957
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int quant_index[4]
Definition: mjpegdec.h:98
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int v_scount[MAX_COMPONENTS]
Definition: mjpegdec.h:95
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:706
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
GetBitContext gb
Definition: mjpegdec.h:49
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:72
HW acceleration through CUDA.
Definition: pixfmt.h:235
#define ZERO_RUN
Definition: mjpegdec.c:944
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
#define FF_ARRAY_ELEMS(a)
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:538
int bits
Definition: vlc.h:27
if(ret)
static const float pred[4]
Definition: siprdata.h:259
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
AVCodec ff_mjpeg_decoder
IDCTDSPContext idsp
Definition: mjpegdec.h:113
#define src1
Definition: h264pred.c:139
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define av_bswap32
Definition: bswap.h:33
Libavcodec external API header.
Views are on top of each other.
Definition: stereo3d.h:79
Definition: mjpeg.h:52
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:87
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2511
enum AVCodecID codec_id
Definition: avcodec.h:536
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
int debug
debug
Definition: avcodec.h:1611
AVStereo3D * stereo3d
Definition: mjpegdec.h:134
main external API structure.
Definition: avcodec.h:526
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
uint8_t * data
The data buffer.
Definition: buffer.h:89
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:551
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1872
uint8_t * data
Definition: frame.h:216
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
int extradata_size
Definition: avcodec.h:628
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:130
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:348
int coded_height
Definition: avcodec.h:714
Describe the class of an AVClass context structure.
Definition: log.h:67
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static const AVProfile profiles[]
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:739
int index
Definition: gxfenc.c:89
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:778
int ac_index[MAX_COMPONENTS]
Definition: mjpegdec.h:92
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1062
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
Definition: mjpeg.h:45
uint64_t coefs_finished[MAX_COMPONENTS]
bitmask of which coefs have been completely decoded (progressive mode)
Definition: mjpegdec.h:108
Definition: mjpeg.h:48
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
enum AVPixelFormat hwaccel_pix_fmt
Definition: mjpegdec.h:153
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:300
uint8_t raw_huffman_values[2][4][256]
Definition: mjpegdec.h:150
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1534
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
#define MIN_CACHE_BITS
Definition: get_bits.h:128
Definition: mjpeg.h:47
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:566
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
JPEG-LS extension parameters.
Definition: mjpeg.h:104
#define flags(name, subs,...)
Definition: cbs_av1.c:560
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
size_t raw_scan_buffer_size
Definition: mjpegdec.h:147
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2500
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
uint8_t level
Definition: svq3.c:205
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1621
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:537
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2472
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:139
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:845
Definition: mjpeg.h:94
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:163
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1228
A reference to a data buffer.
Definition: buffer.h:81
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
const OptionDef options[]
Definition: ffmpeg_opt.c:3393
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:115
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:60
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
#define FF_DEBUG_QP
Definition: avcodec.h:1616
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:2191
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
int den
Denominator.
Definition: rational.h:60
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
static int lowres
Definition: ffplay.c:336
const uint8_t * raw_scan_buffer
Definition: mjpegdec.h:146
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
AVCodecContext * avctx
Definition: mjpegdec.h:48
void * priv_data
Definition: avcodec.h:553
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
#define av_free(p)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1625
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1398
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:460
int got_picture
we found a SOF and picture is valid, too.
Definition: mjpegdec.h:102
int len
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2520
int16_t(*[MAX_COMPONENTS] blocks)[64]
intermediate sums (progressive mode)
Definition: mjpegdec.h:106
AVFrame * picture
Definition: mjpegdec.h:100
void * hwaccel_picture_private
Definition: mjpegdec.h:154
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
The official guide to swscale for confused that is
Definition: swscale.txt:2
Definition: mjpeg.h:50
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
int last_dc[MAX_COMPONENTS]
Definition: mjpegdec.h:99
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:76
#define REFINE_BIT(j)
Definition: mjpegdec.c:936
uint8_t upscale_h[4]
Definition: mjpegdec.h:69
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2843
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1217
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
int height
Definition: frame.h:366
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1183
#define av_always_inline
Definition: attributes.h:45
static const uint8_t start_code[]
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:560
Definition: mjpeg.h:82
#define VD
Definition: cuviddec.c:1071
#define FFSWAP(type, a, b)
Definition: common.h:99
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2193
#define FF_QSCALE_TYPE_MPEG1
Definition: internal.h:92
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
MJPEG decoder.
#define MKTAG(a, b, c, d)
Definition: common.h:406
AVCodec ff_thp_decoder
Definition: mjpeg.h:61
enum AVCodecID id
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
Definition: rpzaenc.c:58
uint16_t quant_matrixes[4][64]
Definition: mjpegdec.h:56
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:358
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define FFMAX3(a, b, c)
Definition: common.h:95
GLuint buffer
Definition: opengl_enc.c:101
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
Definition: mjpeg.h:49
bitstream writer API