FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "hwaccel.h"
40 #include "idctdsp.h"
41 #include "internal.h"
42 #include "jpegtables.h"
43 #include "mjpeg.h"
44 #include "mjpegdec.h"
45 #include "jpeglsdec.h"
46 #include "profiles.h"
47 #include "put_bits.h"
48 #include "tiff.h"
49 #include "exif.h"
50 #include "bytestream.h"
51 
52 
53 static int build_vlc(VLC *vlc, const uint8_t *bits_table,
54  const uint8_t *val_table, int nb_codes,
55  int use_static, int is_ac)
56 {
57  uint8_t huff_size[256] = { 0 };
58  uint16_t huff_code[256];
59  uint16_t huff_sym[256];
60  int i;
61 
62  av_assert0(nb_codes <= 256);
63 
64  ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
65 
66  for (i = 0; i < 256; i++)
67  huff_sym[i] = i + 16 * is_ac;
68 
69  if (is_ac)
70  huff_sym[0] = 16 * 256;
71 
72  return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
73  huff_code, 2, 2, huff_sym, 2, 2, use_static);
74 }
75 
77 {
78  static const struct {
79  int class;
80  int index;
81  const uint8_t *bits;
82  const uint8_t *values;
83  int codes;
84  int length;
85  } ht[] = {
87  avpriv_mjpeg_val_dc, 12, 12 },
89  avpriv_mjpeg_val_dc, 12, 12 },
98  };
99  int i, ret;
100 
101  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
102  ret = build_vlc(&s->vlcs[ht[i].class][ht[i].index],
103  ht[i].bits, ht[i].values, ht[i].codes,
104  0, ht[i].class == 1);
105  if (ret < 0)
106  return ret;
107 
108  if (ht[i].class < 2) {
109  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
110  ht[i].bits + 1, 16);
111  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
112  ht[i].values, ht[i].length);
113  }
114  }
115 
116  return 0;
117 }
118 
120 {
121  s->buggy_avid = 1;
122  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
123  s->interlace_polarity = 1;
124  if (len > 14 && buf[12] == 2) /* 2 - PAL */
125  s->interlace_polarity = 0;
126  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
127  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
128 }
129 
130 static void init_idct(AVCodecContext *avctx)
131 {
132  MJpegDecodeContext *s = avctx->priv_data;
133 
134  ff_idctdsp_init(&s->idsp, avctx);
137 }
138 
140 {
141  MJpegDecodeContext *s = avctx->priv_data;
142  int ret;
143 
144  if (!s->picture_ptr) {
145  s->picture = av_frame_alloc();
146  if (!s->picture)
147  return AVERROR(ENOMEM);
148  s->picture_ptr = s->picture;
149  }
150 
151  s->avctx = avctx;
152  ff_blockdsp_init(&s->bdsp, avctx);
153  ff_hpeldsp_init(&s->hdsp, avctx->flags);
154  init_idct(avctx);
155  s->buffer_size = 0;
156  s->buffer = NULL;
157  s->start_code = -1;
158  s->first_picture = 1;
159  s->got_picture = 0;
160  s->org_height = avctx->coded_height;
162  avctx->colorspace = AVCOL_SPC_BT470BG;
164 
165  if ((ret = init_default_huffman_tables(s)) < 0)
166  return ret;
167 
168  if (s->extern_huff) {
169  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
170  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
171  return ret;
172  if (ff_mjpeg_decode_dht(s)) {
173  av_log(avctx, AV_LOG_ERROR,
174  "error using external huffman table, switching back to internal\n");
176  }
177  }
178  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
179  s->interlace_polarity = 1; /* bottom field first */
180  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
181  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
182  if (avctx->codec_tag == AV_RL32("MJPG"))
183  s->interlace_polarity = 1;
184  }
185 
186  if ( avctx->extradata_size > 8
187  && AV_RL32(avctx->extradata) == 0x2C
188  && AV_RL32(avctx->extradata+4) == 0x18) {
189  parse_avid(s, avctx->extradata, avctx->extradata_size);
190  }
191 
192  if (avctx->codec->id == AV_CODEC_ID_AMV)
193  s->flipped = 1;
194 
195  return 0;
196 }
197 
198 
199 /* quantize tables */
201 {
202  int len, index, i;
203 
204  len = get_bits(&s->gb, 16) - 2;
205 
206  if (8*len > get_bits_left(&s->gb)) {
207  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
208  return AVERROR_INVALIDDATA;
209  }
210 
211  while (len >= 65) {
212  int pr = get_bits(&s->gb, 4);
213  if (pr > 1) {
214  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
215  return AVERROR_INVALIDDATA;
216  }
217  index = get_bits(&s->gb, 4);
218  if (index >= 4)
219  return -1;
220  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
221  /* read quant table */
222  for (i = 0; i < 64; i++) {
223  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
224  if (s->quant_matrixes[index][i] == 0) {
225  av_log(s->avctx, AV_LOG_ERROR, "dqt: 0 quant value\n");
226  return AVERROR_INVALIDDATA;
227  }
228  }
229 
230  // XXX FIXME fine-tune, and perhaps add dc too
231  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
232  s->quant_matrixes[index][8]) >> 1;
233  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
234  index, s->qscale[index]);
235  len -= 1 + 64 * (1+pr);
236  }
237  return 0;
238 }
239 
240 /* decode huffman tables and build VLC decoders */
242 {
243  int len, index, i, class, n, v, code_max;
244  uint8_t bits_table[17];
245  uint8_t val_table[256];
246  int ret = 0;
247 
248  len = get_bits(&s->gb, 16) - 2;
249 
250  if (8*len > get_bits_left(&s->gb)) {
251  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
252  return AVERROR_INVALIDDATA;
253  }
254 
255  while (len > 0) {
256  if (len < 17)
257  return AVERROR_INVALIDDATA;
258  class = get_bits(&s->gb, 4);
259  if (class >= 2)
260  return AVERROR_INVALIDDATA;
261  index = get_bits(&s->gb, 4);
262  if (index >= 4)
263  return AVERROR_INVALIDDATA;
264  n = 0;
265  for (i = 1; i <= 16; i++) {
266  bits_table[i] = get_bits(&s->gb, 8);
267  n += bits_table[i];
268  }
269  len -= 17;
270  if (len < n || n > 256)
271  return AVERROR_INVALIDDATA;
272 
273  code_max = 0;
274  for (i = 0; i < n; i++) {
275  v = get_bits(&s->gb, 8);
276  if (v > code_max)
277  code_max = v;
278  val_table[i] = v;
279  }
280  len -= n;
281 
282  /* build VLC and flush previous vlc if present */
283  ff_free_vlc(&s->vlcs[class][index]);
284  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
285  class, index, code_max + 1);
286  if ((ret = build_vlc(&s->vlcs[class][index], bits_table, val_table,
287  code_max + 1, 0, class > 0)) < 0)
288  return ret;
289 
290  if (class > 0) {
291  ff_free_vlc(&s->vlcs[2][index]);
292  if ((ret = build_vlc(&s->vlcs[2][index], bits_table, val_table,
293  code_max + 1, 0, 0)) < 0)
294  return ret;
295  }
296 
297  for (i = 0; i < 16; i++)
298  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
299  for (i = 0; i < 256; i++)
300  s->raw_huffman_values[class][index][i] = val_table[i];
301  }
302  return 0;
303 }
304 
306 {
307  int len, nb_components, i, width, height, bits, ret, size_change;
308  unsigned pix_fmt_id;
309  int h_count[MAX_COMPONENTS] = { 0 };
310  int v_count[MAX_COMPONENTS] = { 0 };
311 
312  s->cur_scan = 0;
313  memset(s->upscale_h, 0, sizeof(s->upscale_h));
314  memset(s->upscale_v, 0, sizeof(s->upscale_v));
315 
316  len = get_bits(&s->gb, 16);
317  bits = get_bits(&s->gb, 8);
318 
319  if (bits > 16 || bits < 1) {
320  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
321  return AVERROR_INVALIDDATA;
322  }
323 
324  if (s->avctx->bits_per_raw_sample != bits) {
325  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
327  init_idct(s->avctx);
328  }
329  if (s->pegasus_rct)
330  bits = 9;
331  if (bits == 9 && !s->pegasus_rct)
332  s->rct = 1; // FIXME ugly
333 
334  if(s->lossless && s->avctx->lowres){
335  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
336  return -1;
337  }
338 
339  height = get_bits(&s->gb, 16);
340  width = get_bits(&s->gb, 16);
341 
342  // HACK for odd_height.mov
343  if (s->interlaced && s->width == width && s->height == height + 1)
344  height= s->height;
345 
346  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
347  if (av_image_check_size(width, height, 0, s->avctx) < 0)
348  return AVERROR_INVALIDDATA;
349  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
350  return AVERROR_INVALIDDATA;
351 
352  nb_components = get_bits(&s->gb, 8);
353  if (nb_components <= 0 ||
354  nb_components > MAX_COMPONENTS)
355  return -1;
356  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
357  if (nb_components != s->nb_components) {
359  "nb_components changing in interlaced picture\n");
360  return AVERROR_INVALIDDATA;
361  }
362  }
363  if (s->ls && !(bits <= 8 || nb_components == 1)) {
365  "JPEG-LS that is not <= 8 "
366  "bits/component or 16-bit gray");
367  return AVERROR_PATCHWELCOME;
368  }
369  if (len != 8 + 3 * nb_components) {
370  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
371  return AVERROR_INVALIDDATA;
372  }
373 
374  s->nb_components = nb_components;
375  s->h_max = 1;
376  s->v_max = 1;
377  for (i = 0; i < nb_components; i++) {
378  /* component id */
379  s->component_id[i] = get_bits(&s->gb, 8) - 1;
380  h_count[i] = get_bits(&s->gb, 4);
381  v_count[i] = get_bits(&s->gb, 4);
382  /* compute hmax and vmax (only used in interleaved case) */
383  if (h_count[i] > s->h_max)
384  s->h_max = h_count[i];
385  if (v_count[i] > s->v_max)
386  s->v_max = v_count[i];
387  s->quant_index[i] = get_bits(&s->gb, 8);
388  if (s->quant_index[i] >= 4) {
389  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
390  return AVERROR_INVALIDDATA;
391  }
392  if (!h_count[i] || !v_count[i]) {
394  "Invalid sampling factor in component %d %d:%d\n",
395  i, h_count[i], v_count[i]);
396  return AVERROR_INVALIDDATA;
397  }
398 
399  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
400  i, h_count[i], v_count[i],
401  s->component_id[i], s->quant_index[i]);
402  }
403  if ( nb_components == 4
404  && s->component_id[0] == 'C' - 1
405  && s->component_id[1] == 'M' - 1
406  && s->component_id[2] == 'Y' - 1
407  && s->component_id[3] == 'K' - 1)
408  s->adobe_transform = 0;
409 
410  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
411  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
412  return AVERROR_PATCHWELCOME;
413  }
414 
415  if (s->bayer) {
416  if (nb_components == 2) {
417  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
418  width stored in their SOF3 markers is the width of each one. We only output
419  a single component, therefore we need to adjust the output image width. We
420  handle the deinterleaving (but not the debayering) in this file. */
421  width *= 2;
422  }
423  /* They can also contain 1 component, which is double the width and half the height
424  of the final image (rows are interleaved). We don't handle the decoding in this
425  file, but leave that to the TIFF/DNG decoder. */
426  }
427 
428  /* if different size, realloc/alloc picture */
429  if (width != s->width || height != s->height || bits != s->bits ||
430  memcmp(s->h_count, h_count, sizeof(h_count)) ||
431  memcmp(s->v_count, v_count, sizeof(v_count))) {
432  size_change = 1;
433 
434  s->width = width;
435  s->height = height;
436  s->bits = bits;
437  memcpy(s->h_count, h_count, sizeof(h_count));
438  memcpy(s->v_count, v_count, sizeof(v_count));
439  s->interlaced = 0;
440  s->got_picture = 0;
441 
442  /* test interlaced mode */
443  if (s->first_picture &&
444  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
445  s->org_height != 0 &&
446  s->height < ((s->org_height * 3) / 4)) {
447  s->interlaced = 1;
451  height *= 2;
452  }
453 
454  ret = ff_set_dimensions(s->avctx, width, height);
455  if (ret < 0)
456  return ret;
457 
458  s->first_picture = 0;
459  } else {
460  size_change = 0;
461  }
462 
463  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
464  if (s->progressive) {
465  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
466  return AVERROR_INVALIDDATA;
467  }
468  } else {
469  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
470  s->rgb = 1;
471  else if (!s->lossless)
472  s->rgb = 0;
473  /* XXX: not complete test ! */
474  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
475  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
476  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
477  (s->h_count[3] << 4) | s->v_count[3];
478  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
479  /* NOTE we do not allocate pictures large enough for the possible
480  * padding of h/v_count being 4 */
481  if (!(pix_fmt_id & 0xD0D0D0D0))
482  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
483  if (!(pix_fmt_id & 0x0D0D0D0D))
484  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
485 
486  for (i = 0; i < 8; i++) {
487  int j = 6 + (i&1) - (i&6);
488  int is = (pix_fmt_id >> (4*i)) & 0xF;
489  int js = (pix_fmt_id >> (4*j)) & 0xF;
490 
491  if (is == 1 && js != 2 && (i < 2 || i > 5))
492  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
493  if (is == 1 && js != 2 && (i < 2 || i > 5))
494  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
495 
496  if (is == 1 && js == 2) {
497  if (i & 1) s->upscale_h[j/2] = 1;
498  else s->upscale_v[j/2] = 1;
499  }
500  }
501 
502  switch (pix_fmt_id) {
503  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
504  if (!s->bayer)
505  goto unk_pixfmt;
507  break;
508  case 0x11111100:
509  if (s->rgb)
511  else {
512  if ( s->adobe_transform == 0
513  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
515  } else {
519  }
520  }
521  av_assert0(s->nb_components == 3);
522  break;
523  case 0x11111111:
524  if (s->rgb)
526  else {
527  if (s->adobe_transform == 0 && s->bits <= 8) {
529  } else {
532  }
533  }
534  av_assert0(s->nb_components == 4);
535  break;
536  case 0x22111122:
537  case 0x22111111:
538  if (s->adobe_transform == 0 && s->bits <= 8) {
540  s->upscale_v[1] = s->upscale_v[2] = 1;
541  s->upscale_h[1] = s->upscale_h[2] = 1;
542  } else if (s->adobe_transform == 2 && s->bits <= 8) {
544  s->upscale_v[1] = s->upscale_v[2] = 1;
545  s->upscale_h[1] = s->upscale_h[2] = 1;
547  } else {
548  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
551  }
552  av_assert0(s->nb_components == 4);
553  break;
554  case 0x12121100:
555  case 0x22122100:
556  case 0x21211100:
557  case 0x22211200:
558  case 0x22221100:
559  case 0x22112200:
560  case 0x11222200:
562  else
563  goto unk_pixfmt;
565  break;
566  case 0x11000000:
567  case 0x13000000:
568  case 0x14000000:
569  case 0x31000000:
570  case 0x33000000:
571  case 0x34000000:
572  case 0x41000000:
573  case 0x43000000:
574  case 0x44000000:
575  if(s->bits <= 8)
577  else
579  break;
580  case 0x12111100:
581  case 0x14121200:
582  case 0x14111100:
583  case 0x22211100:
584  case 0x22112100:
585  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
586  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
587  else
588  goto unk_pixfmt;
589  s->upscale_v[0] = s->upscale_v[1] = 1;
590  } else {
591  if (pix_fmt_id == 0x14111100)
592  s->upscale_v[1] = s->upscale_v[2] = 1;
594  else
595  goto unk_pixfmt;
597  }
598  break;
599  case 0x21111100:
600  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
601  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
602  else
603  goto unk_pixfmt;
604  s->upscale_h[0] = s->upscale_h[1] = 1;
605  } else {
609  }
610  break;
611  case 0x31111100:
612  if (s->bits > 8)
613  goto unk_pixfmt;
616  s->upscale_h[1] = s->upscale_h[2] = 2;
617  break;
618  case 0x22121100:
619  case 0x22111200:
621  else
622  goto unk_pixfmt;
624  break;
625  case 0x22111100:
626  case 0x23111100:
627  case 0x42111100:
628  case 0x24111100:
632  if (pix_fmt_id == 0x42111100) {
633  if (s->bits > 8)
634  goto unk_pixfmt;
635  s->upscale_h[1] = s->upscale_h[2] = 1;
636  } else if (pix_fmt_id == 0x24111100) {
637  if (s->bits > 8)
638  goto unk_pixfmt;
639  s->upscale_v[1] = s->upscale_v[2] = 1;
640  } else if (pix_fmt_id == 0x23111100) {
641  if (s->bits > 8)
642  goto unk_pixfmt;
643  s->upscale_v[1] = s->upscale_v[2] = 2;
644  }
645  break;
646  case 0x41111100:
648  else
649  goto unk_pixfmt;
651  break;
652  default:
653  unk_pixfmt:
654  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
655  memset(s->upscale_h, 0, sizeof(s->upscale_h));
656  memset(s->upscale_v, 0, sizeof(s->upscale_v));
657  return AVERROR_PATCHWELCOME;
658  }
659  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
660  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
661  return AVERROR_PATCHWELCOME;
662  }
663  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
664  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
665  return AVERROR_PATCHWELCOME;
666  }
667  if (s->ls) {
668  memset(s->upscale_h, 0, sizeof(s->upscale_h));
669  memset(s->upscale_v, 0, sizeof(s->upscale_v));
670  if (s->nb_components == 3) {
672  } else if (s->nb_components != 1) {
673  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
674  return AVERROR_PATCHWELCOME;
675  } else if (s->palette_index && s->bits <= 8)
677  else if (s->bits <= 8)
679  else
681  }
682 
684  if (!s->pix_desc) {
685  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
686  return AVERROR_BUG;
687  }
688 
689  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
690  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
691  } else {
692  enum AVPixelFormat pix_fmts[] = {
693 #if CONFIG_MJPEG_NVDEC_HWACCEL
695 #endif
696 #if CONFIG_MJPEG_VAAPI_HWACCEL
698 #endif
699  s->avctx->pix_fmt,
701  };
702  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
703  if (s->hwaccel_pix_fmt < 0)
704  return AVERROR(EINVAL);
705 
707  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
708  }
709 
710  if (s->avctx->skip_frame == AVDISCARD_ALL) {
712  s->picture_ptr->key_frame = 1;
713  s->got_picture = 1;
714  return 0;
715  }
716 
719  return -1;
721  s->picture_ptr->key_frame = 1;
722  s->got_picture = 1;
723 
724  for (i = 0; i < 4; i++)
725  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
726 
727  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
728  s->width, s->height, s->linesize[0], s->linesize[1],
729  s->interlaced, s->avctx->height);
730 
731  }
732 
733  if ((s->rgb && !s->lossless && !s->ls) ||
734  (!s->rgb && s->ls && s->nb_components > 1) ||
735  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
736  ) {
737  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
738  return AVERROR_PATCHWELCOME;
739  }
740 
741  /* totally blank picture as progressive JPEG will only add details to it */
742  if (s->progressive) {
743  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
744  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
745  for (i = 0; i < s->nb_components; i++) {
746  int size = bw * bh * s->h_count[i] * s->v_count[i];
747  av_freep(&s->blocks[i]);
748  av_freep(&s->last_nnz[i]);
749  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
750  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
751  if (!s->blocks[i] || !s->last_nnz[i])
752  return AVERROR(ENOMEM);
753  s->block_stride[i] = bw * s->h_count[i];
754  }
755  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
756  }
757 
758  if (s->avctx->hwaccel) {
761  if (!s->hwaccel_picture_private)
762  return AVERROR(ENOMEM);
763 
764  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
766  if (ret < 0)
767  return ret;
768  }
769 
770  return 0;
771 }
772 
773 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
774 {
775  int code;
776  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
777  if (code < 0 || code > 16) {
779  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
780  0, dc_index, &s->vlcs[0][dc_index]);
781  return 0xfffff;
782  }
783 
784  if (code)
785  return get_xbits(&s->gb, code);
786  else
787  return 0;
788 }
789 
790 /* decode block and dequantize */
791 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
792  int dc_index, int ac_index, uint16_t *quant_matrix)
793 {
794  int code, i, j, level, val;
795 
796  /* DC coef */
797  val = mjpeg_decode_dc(s, dc_index);
798  if (val == 0xfffff) {
799  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
800  return AVERROR_INVALIDDATA;
801  }
802  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
803  val = av_clip_int16(val);
804  s->last_dc[component] = val;
805  block[0] = val;
806  /* AC coefs */
807  i = 0;
808  {OPEN_READER(re, &s->gb);
809  do {
810  UPDATE_CACHE(re, &s->gb);
811  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
812 
813  i += ((unsigned)code) >> 4;
814  code &= 0xf;
815  if (code) {
816  if (code > MIN_CACHE_BITS - 16)
817  UPDATE_CACHE(re, &s->gb);
818 
819  {
820  int cache = GET_CACHE(re, &s->gb);
821  int sign = (~cache) >> 31;
822  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
823  }
824 
825  LAST_SKIP_BITS(re, &s->gb, code);
826 
827  if (i > 63) {
828  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
829  return AVERROR_INVALIDDATA;
830  }
831  j = s->scantable.permutated[i];
832  block[j] = level * quant_matrix[i];
833  }
834  } while (i < 63);
835  CLOSE_READER(re, &s->gb);}
836 
837  return 0;
838 }
839 
841  int component, int dc_index,
842  uint16_t *quant_matrix, int Al)
843 {
844  unsigned val;
845  s->bdsp.clear_block(block);
846  val = mjpeg_decode_dc(s, dc_index);
847  if (val == 0xfffff) {
848  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
849  return AVERROR_INVALIDDATA;
850  }
851  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
852  s->last_dc[component] = val;
853  block[0] = val;
854  return 0;
855 }
856 
857 /* decode block and dequantize - progressive JPEG version */
859  uint8_t *last_nnz, int ac_index,
860  uint16_t *quant_matrix,
861  int ss, int se, int Al, int *EOBRUN)
862 {
863  int code, i, j, val, run;
864  unsigned level;
865 
866  if (*EOBRUN) {
867  (*EOBRUN)--;
868  return 0;
869  }
870 
871  {
872  OPEN_READER(re, &s->gb);
873  for (i = ss; ; i++) {
874  UPDATE_CACHE(re, &s->gb);
875  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
876 
877  run = ((unsigned) code) >> 4;
878  code &= 0xF;
879  if (code) {
880  i += run;
881  if (code > MIN_CACHE_BITS - 16)
882  UPDATE_CACHE(re, &s->gb);
883 
884  {
885  int cache = GET_CACHE(re, &s->gb);
886  int sign = (~cache) >> 31;
887  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
888  }
889 
890  LAST_SKIP_BITS(re, &s->gb, code);
891 
892  if (i >= se) {
893  if (i == se) {
894  j = s->scantable.permutated[se];
895  block[j] = level * (quant_matrix[se] << Al);
896  break;
897  }
898  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
899  return AVERROR_INVALIDDATA;
900  }
901  j = s->scantable.permutated[i];
902  block[j] = level * (quant_matrix[i] << Al);
903  } else {
904  if (run == 0xF) {// ZRL - skip 15 coefficients
905  i += 15;
906  if (i >= se) {
907  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
908  return AVERROR_INVALIDDATA;
909  }
910  } else {
911  val = (1 << run);
912  if (run) {
913  UPDATE_CACHE(re, &s->gb);
914  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
915  LAST_SKIP_BITS(re, &s->gb, run);
916  }
917  *EOBRUN = val - 1;
918  break;
919  }
920  }
921  }
922  CLOSE_READER(re, &s->gb);
923  }
924 
925  if (i > *last_nnz)
926  *last_nnz = i;
927 
928  return 0;
929 }
930 
931 #define REFINE_BIT(j) { \
932  UPDATE_CACHE(re, &s->gb); \
933  sign = block[j] >> 15; \
934  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
935  ((quant_matrix[i] ^ sign) - sign) << Al; \
936  LAST_SKIP_BITS(re, &s->gb, 1); \
937 }
938 
939 #define ZERO_RUN \
940 for (; ; i++) { \
941  if (i > last) { \
942  i += run; \
943  if (i > se) { \
944  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
945  return -1; \
946  } \
947  break; \
948  } \
949  j = s->scantable.permutated[i]; \
950  if (block[j]) \
951  REFINE_BIT(j) \
952  else if (run-- == 0) \
953  break; \
954 }
955 
956 /* decode block and dequantize - progressive JPEG refinement pass */
958  uint8_t *last_nnz,
959  int ac_index, uint16_t *quant_matrix,
960  int ss, int se, int Al, int *EOBRUN)
961 {
962  int code, i = ss, j, sign, val, run;
963  int last = FFMIN(se, *last_nnz);
964 
965  OPEN_READER(re, &s->gb);
966  if (*EOBRUN) {
967  (*EOBRUN)--;
968  } else {
969  for (; ; i++) {
970  UPDATE_CACHE(re, &s->gb);
971  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
972 
973  if (code & 0xF) {
974  run = ((unsigned) code) >> 4;
975  UPDATE_CACHE(re, &s->gb);
976  val = SHOW_UBITS(re, &s->gb, 1);
977  LAST_SKIP_BITS(re, &s->gb, 1);
978  ZERO_RUN;
979  j = s->scantable.permutated[i];
980  val--;
981  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
982  if (i == se) {
983  if (i > *last_nnz)
984  *last_nnz = i;
985  CLOSE_READER(re, &s->gb);
986  return 0;
987  }
988  } else {
989  run = ((unsigned) code) >> 4;
990  if (run == 0xF) {
991  ZERO_RUN;
992  } else {
993  val = run;
994  run = (1 << run);
995  if (val) {
996  UPDATE_CACHE(re, &s->gb);
997  run += SHOW_UBITS(re, &s->gb, val);
998  LAST_SKIP_BITS(re, &s->gb, val);
999  }
1000  *EOBRUN = run - 1;
1001  break;
1002  }
1003  }
1004  }
1005 
1006  if (i > *last_nnz)
1007  *last_nnz = i;
1008  }
1009 
1010  for (; i <= last; i++) {
1011  j = s->scantable.permutated[i];
1012  if (block[j])
1013  REFINE_BIT(j)
1014  }
1015  CLOSE_READER(re, &s->gb);
1016 
1017  return 0;
1018 }
1019 #undef REFINE_BIT
1020 #undef ZERO_RUN
1021 
1022 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1023 {
1024  int i;
1025  int reset = 0;
1026 
1027  if (s->restart_interval) {
1028  s->restart_count--;
1029  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1030  align_get_bits(&s->gb);
1031  for (i = 0; i < nb_components; i++) /* reset dc */
1032  s->last_dc[i] = (4 << s->bits);
1033  }
1034 
1035  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1036  /* skip RSTn */
1037  if (s->restart_count == 0) {
1038  if( show_bits(&s->gb, i) == (1 << i) - 1
1039  || show_bits(&s->gb, i) == 0xFF) {
1040  int pos = get_bits_count(&s->gb);
1041  align_get_bits(&s->gb);
1042  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1043  skip_bits(&s->gb, 8);
1044  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1045  for (i = 0; i < nb_components; i++) /* reset dc */
1046  s->last_dc[i] = (4 << s->bits);
1047  reset = 1;
1048  } else
1049  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1050  }
1051  }
1052  }
1053  return reset;
1054 }
1055 
1056 /* Handles 1 to 4 components */
1057 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1058 {
1059  int i, mb_x, mb_y;
1060  unsigned width;
1061  uint16_t (*buffer)[4];
1062  int left[4], top[4], topleft[4];
1063  const int linesize = s->linesize[0];
1064  const int mask = ((1 << s->bits) - 1) << point_transform;
1065  int resync_mb_y = 0;
1066  int resync_mb_x = 0;
1067  int vpred[6];
1068 
1069  if (!s->bayer && s->nb_components < 3)
1070  return AVERROR_INVALIDDATA;
1071  if (s->bayer && s->nb_components > 2)
1072  return AVERROR_INVALIDDATA;
1073  if (s->nb_components <= 0 || s->nb_components > 4)
1074  return AVERROR_INVALIDDATA;
1075  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1076  return AVERROR_INVALIDDATA;
1077 
1078 
1080 
1081  if (s->restart_interval == 0)
1082  s->restart_interval = INT_MAX;
1083 
1084  if (s->bayer)
1085  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1086  else
1087  width = s->mb_width;
1088 
1089  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1090  if (!s->ljpeg_buffer)
1091  return AVERROR(ENOMEM);
1092 
1093  buffer = s->ljpeg_buffer;
1094 
1095  for (i = 0; i < 4; i++)
1096  buffer[0][i] = 1 << (s->bits - 1);
1097 
1098  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1099  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1100 
1101  if (s->interlaced && s->bottom_field)
1102  ptr += linesize >> 1;
1103 
1104  for (i = 0; i < 4; i++)
1105  top[i] = left[i] = topleft[i] = buffer[0][i];
1106 
1107  if ((mb_y * s->width) % s->restart_interval == 0) {
1108  for (i = 0; i < 6; i++)
1109  vpred[i] = 1 << (s->bits-1);
1110  }
1111 
1112  for (mb_x = 0; mb_x < width; mb_x++) {
1113  int modified_predictor = predictor;
1114 
1115  if (get_bits_left(&s->gb) < 1) {
1116  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1117  return AVERROR_INVALIDDATA;
1118  }
1119 
1120  if (s->restart_interval && !s->restart_count){
1122  resync_mb_x = mb_x;
1123  resync_mb_y = mb_y;
1124  for(i=0; i<4; i++)
1125  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1126  }
1127  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1128  modified_predictor = 1;
1129 
1130  for (i=0;i<nb_components;i++) {
1131  int pred, dc;
1132 
1133  topleft[i] = top[i];
1134  top[i] = buffer[mb_x][i];
1135 
1136  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1137  if(dc == 0xFFFFF)
1138  return -1;
1139 
1140  if (!s->bayer || mb_x) {
1141  pred = left[i];
1142  } else { /* This path runs only for the first line in bayer images */
1143  vpred[i] += dc;
1144  pred = vpred[i] - dc;
1145  }
1146 
1147  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1148 
1149  left[i] = buffer[mb_x][i] =
1150  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1151  }
1152 
1153  if (s->restart_interval && !--s->restart_count) {
1154  align_get_bits(&s->gb);
1155  skip_bits(&s->gb, 16); /* skip RSTn */
1156  }
1157  }
1158  if (s->rct && s->nb_components == 4) {
1159  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1160  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1161  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1162  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1163  ptr[4*mb_x + 0] = buffer[mb_x][3];
1164  }
1165  } else if (s->nb_components == 4) {
1166  for(i=0; i<nb_components; i++) {
1167  int c= s->comp_index[i];
1168  if (s->bits <= 8) {
1169  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1170  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1171  }
1172  } else if(s->bits == 9) {
1173  return AVERROR_PATCHWELCOME;
1174  } else {
1175  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1176  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1177  }
1178  }
1179  }
1180  } else if (s->rct) {
1181  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1182  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1183  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1184  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1185  }
1186  } else if (s->pegasus_rct) {
1187  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1188  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1189  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1190  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1191  }
1192  } else if (s->bayer) {
1193  if (nb_components == 1) {
1194  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1195  for (mb_x = 0; mb_x < width; mb_x++)
1196  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1197  } else if (nb_components == 2) {
1198  for (mb_x = 0; mb_x < width; mb_x++) {
1199  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1200  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1201  }
1202  }
1203  } else {
1204  for(i=0; i<nb_components; i++) {
1205  int c= s->comp_index[i];
1206  if (s->bits <= 8) {
1207  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1208  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1209  }
1210  } else if(s->bits == 9) {
1211  return AVERROR_PATCHWELCOME;
1212  } else {
1213  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1214  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1215  }
1216  }
1217  }
1218  }
1219  }
1220  return 0;
1221 }
1222 
1223 static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
1224  int point_transform, int nb_components)
1225 {
1226  int i, mb_x, mb_y, mask;
1227  int bits= (s->bits+7)&~7;
1228  int resync_mb_y = 0;
1229  int resync_mb_x = 0;
1230 
1231  point_transform += bits - s->bits;
1232  mask = ((1 << s->bits) - 1) << point_transform;
1233 
1234  av_assert0(nb_components>=1 && nb_components<=4);
1235 
1236  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1237  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1238  if (get_bits_left(&s->gb) < 1) {
1239  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1240  return AVERROR_INVALIDDATA;
1241  }
1242  if (s->restart_interval && !s->restart_count){
1244  resync_mb_x = mb_x;
1245  resync_mb_y = mb_y;
1246  }
1247 
1248  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1249  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1250  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1251  for (i = 0; i < nb_components; i++) {
1252  uint8_t *ptr;
1253  uint16_t *ptr16;
1254  int n, h, v, x, y, c, j, linesize;
1255  n = s->nb_blocks[i];
1256  c = s->comp_index[i];
1257  h = s->h_scount[i];
1258  v = s->v_scount[i];
1259  x = 0;
1260  y = 0;
1261  linesize= s->linesize[c];
1262 
1263  if(bits>8) linesize /= 2;
1264 
1265  for(j=0; j<n; j++) {
1266  int pred, dc;
1267 
1268  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1269  if(dc == 0xFFFFF)
1270  return -1;
1271  if ( h * mb_x + x >= s->width
1272  || v * mb_y + y >= s->height) {
1273  // Nothing to do
1274  } else if (bits<=8) {
1275  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1276  if(y==0 && toprow){
1277  if(x==0 && leftcol){
1278  pred= 1 << (bits - 1);
1279  }else{
1280  pred= ptr[-1];
1281  }
1282  }else{
1283  if(x==0 && leftcol){
1284  pred= ptr[-linesize];
1285  }else{
1286  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1287  }
1288  }
1289 
1290  if (s->interlaced && s->bottom_field)
1291  ptr += linesize >> 1;
1292  pred &= mask;
1293  *ptr= pred + ((unsigned)dc << point_transform);
1294  }else{
1295  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1296  if(y==0 && toprow){
1297  if(x==0 && leftcol){
1298  pred= 1 << (bits - 1);
1299  }else{
1300  pred= ptr16[-1];
1301  }
1302  }else{
1303  if(x==0 && leftcol){
1304  pred= ptr16[-linesize];
1305  }else{
1306  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1307  }
1308  }
1309 
1310  if (s->interlaced && s->bottom_field)
1311  ptr16 += linesize >> 1;
1312  pred &= mask;
1313  *ptr16= pred + ((unsigned)dc << point_transform);
1314  }
1315  if (++x == h) {
1316  x = 0;
1317  y++;
1318  }
1319  }
1320  }
1321  } else {
1322  for (i = 0; i < nb_components; i++) {
1323  uint8_t *ptr;
1324  uint16_t *ptr16;
1325  int n, h, v, x, y, c, j, linesize, dc;
1326  n = s->nb_blocks[i];
1327  c = s->comp_index[i];
1328  h = s->h_scount[i];
1329  v = s->v_scount[i];
1330  x = 0;
1331  y = 0;
1332  linesize = s->linesize[c];
1333 
1334  if(bits>8) linesize /= 2;
1335 
1336  for (j = 0; j < n; j++) {
1337  int pred;
1338 
1339  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1340  if(dc == 0xFFFFF)
1341  return -1;
1342  if ( h * mb_x + x >= s->width
1343  || v * mb_y + y >= s->height) {
1344  // Nothing to do
1345  } else if (bits<=8) {
1346  ptr = s->picture_ptr->data[c] +
1347  (linesize * (v * mb_y + y)) +
1348  (h * mb_x + x); //FIXME optimize this crap
1349  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1350 
1351  pred &= mask;
1352  *ptr = pred + ((unsigned)dc << point_transform);
1353  }else{
1354  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1355  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1356 
1357  pred &= mask;
1358  *ptr16= pred + ((unsigned)dc << point_transform);
1359  }
1360 
1361  if (++x == h) {
1362  x = 0;
1363  y++;
1364  }
1365  }
1366  }
1367  }
1368  if (s->restart_interval && !--s->restart_count) {
1369  align_get_bits(&s->gb);
1370  skip_bits(&s->gb, 16); /* skip RSTn */
1371  }
1372  }
1373  }
1374  return 0;
1375 }
1376 
1378  uint8_t *dst, const uint8_t *src,
1379  int linesize, int lowres)
1380 {
1381  switch (lowres) {
1382  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1383  break;
1384  case 1: copy_block4(dst, src, linesize, linesize, 4);
1385  break;
1386  case 2: copy_block2(dst, src, linesize, linesize, 2);
1387  break;
1388  case 3: *dst = *src;
1389  break;
1390  }
1391 }
1392 
1393 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1394 {
1395  int block_x, block_y;
1396  int size = 8 >> s->avctx->lowres;
1397  if (s->bits > 8) {
1398  for (block_y=0; block_y<size; block_y++)
1399  for (block_x=0; block_x<size; block_x++)
1400  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1401  } else {
1402  for (block_y=0; block_y<size; block_y++)
1403  for (block_x=0; block_x<size; block_x++)
1404  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1405  }
1406 }
1407 
1408 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1409  int Al, const uint8_t *mb_bitmask,
1410  int mb_bitmask_size,
1411  const AVFrame *reference)
1412 {
1413  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1415  const uint8_t *reference_data[MAX_COMPONENTS];
1416  int linesize[MAX_COMPONENTS];
1417  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1418  int bytes_per_pixel = 1 + (s->bits > 8);
1419 
1420  if (mb_bitmask) {
1421  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1422  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1423  return AVERROR_INVALIDDATA;
1424  }
1425  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1426  }
1427 
1428  s->restart_count = 0;
1429 
1430  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1431  &chroma_v_shift);
1432  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1433  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1434 
1435  for (i = 0; i < nb_components; i++) {
1436  int c = s->comp_index[i];
1437  data[c] = s->picture_ptr->data[c];
1438  reference_data[c] = reference ? reference->data[c] : NULL;
1439  linesize[c] = s->linesize[c];
1440  s->coefs_finished[c] |= 1;
1441  }
1442 
1443  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1444  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1445  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1446 
1447  if (s->restart_interval && !s->restart_count)
1449 
1450  if (get_bits_left(&s->gb) < 0) {
1451  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1452  -get_bits_left(&s->gb));
1453  return AVERROR_INVALIDDATA;
1454  }
1455  for (i = 0; i < nb_components; i++) {
1456  uint8_t *ptr;
1457  int n, h, v, x, y, c, j;
1458  int block_offset;
1459  n = s->nb_blocks[i];
1460  c = s->comp_index[i];
1461  h = s->h_scount[i];
1462  v = s->v_scount[i];
1463  x = 0;
1464  y = 0;
1465  for (j = 0; j < n; j++) {
1466  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1467  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1468 
1469  if (s->interlaced && s->bottom_field)
1470  block_offset += linesize[c] >> 1;
1471  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1472  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1473  ptr = data[c] + block_offset;
1474  } else
1475  ptr = NULL;
1476  if (!s->progressive) {
1477  if (copy_mb) {
1478  if (ptr)
1479  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1480  linesize[c], s->avctx->lowres);
1481 
1482  } else {
1483  s->bdsp.clear_block(s->block);
1484  if (decode_block(s, s->block, i,
1485  s->dc_index[i], s->ac_index[i],
1486  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1488  "error y=%d x=%d\n", mb_y, mb_x);
1489  return AVERROR_INVALIDDATA;
1490  }
1491  if (ptr) {
1492  s->idsp.idct_put(ptr, linesize[c], s->block);
1493  if (s->bits & 7)
1494  shift_output(s, ptr, linesize[c]);
1495  }
1496  }
1497  } else {
1498  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1499  (h * mb_x + x);
1500  int16_t *block = s->blocks[c][block_idx];
1501  if (Ah)
1502  block[0] += get_bits1(&s->gb) *
1503  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1504  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1505  s->quant_matrixes[s->quant_sindex[i]],
1506  Al) < 0) {
1508  "error y=%d x=%d\n", mb_y, mb_x);
1509  return AVERROR_INVALIDDATA;
1510  }
1511  }
1512  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1513  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1514  mb_x, mb_y, x, y, c, s->bottom_field,
1515  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1516  if (++x == h) {
1517  x = 0;
1518  y++;
1519  }
1520  }
1521  }
1522 
1523  handle_rstn(s, nb_components);
1524  }
1525  }
1526  return 0;
1527 }
1528 
1530  int se, int Ah, int Al)
1531 {
1532  int mb_x, mb_y;
1533  int EOBRUN = 0;
1534  int c = s->comp_index[0];
1535  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1536 
1537  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1538  if (se < ss || se > 63) {
1539  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1540  return AVERROR_INVALIDDATA;
1541  }
1542 
1543  // s->coefs_finished is a bitmask for coefficients coded
1544  // ss and se are parameters telling start and end coefficients
1545  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1546 
1547  s->restart_count = 0;
1548 
1549  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1550  int block_idx = mb_y * s->block_stride[c];
1551  int16_t (*block)[64] = &s->blocks[c][block_idx];
1552  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1553  if (get_bits_left(&s->gb) <= 0) {
1554  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1555  return AVERROR_INVALIDDATA;
1556  }
1557  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1558  int ret;
1559  if (s->restart_interval && !s->restart_count)
1561 
1562  if (Ah)
1563  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1564  quant_matrix, ss, se, Al, &EOBRUN);
1565  else
1566  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1567  quant_matrix, ss, se, Al, &EOBRUN);
1568  if (ret < 0) {
1570  "error y=%d x=%d\n", mb_y, mb_x);
1571  return AVERROR_INVALIDDATA;
1572  }
1573 
1574  if (handle_rstn(s, 0))
1575  EOBRUN = 0;
1576  }
1577  }
1578  return 0;
1579 }
1580 
1582 {
1583  int mb_x, mb_y;
1584  int c;
1585  const int bytes_per_pixel = 1 + (s->bits > 8);
1586  const int block_size = s->lossless ? 1 : 8;
1587 
1588  for (c = 0; c < s->nb_components; c++) {
1589  uint8_t *data = s->picture_ptr->data[c];
1590  int linesize = s->linesize[c];
1591  int h = s->h_max / s->h_count[c];
1592  int v = s->v_max / s->v_count[c];
1593  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1594  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1595 
1596  if (~s->coefs_finished[c])
1597  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1598 
1599  if (s->interlaced && s->bottom_field)
1600  data += linesize >> 1;
1601 
1602  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1603  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1604  int block_idx = mb_y * s->block_stride[c];
1605  int16_t (*block)[64] = &s->blocks[c][block_idx];
1606  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1607  s->idsp.idct_put(ptr, linesize, *block);
1608  if (s->bits & 7)
1609  shift_output(s, ptr, linesize);
1610  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1611  }
1612  }
1613  }
1614 }
1615 
1617  int mb_bitmask_size, const AVFrame *reference)
1618 {
1619  int len, nb_components, i, h, v, predictor, point_transform;
1620  int index, id, ret;
1621  const int block_size = s->lossless ? 1 : 8;
1622  int ilv, prev_shift;
1623 
1624  if (!s->got_picture) {
1626  "Can not process SOS before SOF, skipping\n");
1627  return -1;
1628  }
1629 
1630  if (reference) {
1631  if (reference->width != s->picture_ptr->width ||
1632  reference->height != s->picture_ptr->height ||
1633  reference->format != s->picture_ptr->format) {
1634  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1635  return AVERROR_INVALIDDATA;
1636  }
1637  }
1638 
1639  /* XXX: verify len field validity */
1640  len = get_bits(&s->gb, 16);
1641  nb_components = get_bits(&s->gb, 8);
1642  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1644  "decode_sos: nb_components (%d)",
1645  nb_components);
1646  return AVERROR_PATCHWELCOME;
1647  }
1648  if (len != 6 + 2 * nb_components) {
1649  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1650  return AVERROR_INVALIDDATA;
1651  }
1652  for (i = 0; i < nb_components; i++) {
1653  id = get_bits(&s->gb, 8) - 1;
1654  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1655  /* find component index */
1656  for (index = 0; index < s->nb_components; index++)
1657  if (id == s->component_id[index])
1658  break;
1659  if (index == s->nb_components) {
1661  "decode_sos: index(%d) out of components\n", index);
1662  return AVERROR_INVALIDDATA;
1663  }
1664  /* Metasoft MJPEG codec has Cb and Cr swapped */
1665  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1666  && nb_components == 3 && s->nb_components == 3 && i)
1667  index = 3 - i;
1668 
1669  s->quant_sindex[i] = s->quant_index[index];
1670  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1671  s->h_scount[i] = s->h_count[index];
1672  s->v_scount[i] = s->v_count[index];
1673 
1674  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1675  index = (index+2)%3;
1676 
1677  s->comp_index[i] = index;
1678 
1679  s->dc_index[i] = get_bits(&s->gb, 4);
1680  s->ac_index[i] = get_bits(&s->gb, 4);
1681 
1682  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1683  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1684  goto out_of_range;
1685  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1686  goto out_of_range;
1687  }
1688 
1689  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1690  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1691  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1692  prev_shift = get_bits(&s->gb, 4); /* Ah */
1693  point_transform = get_bits(&s->gb, 4); /* Al */
1694  }else
1695  prev_shift = point_transform = 0;
1696 
1697  if (nb_components > 1) {
1698  /* interleaved stream */
1699  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1700  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1701  } else if (!s->ls) { /* skip this for JPEG-LS */
1702  h = s->h_max / s->h_scount[0];
1703  v = s->v_max / s->v_scount[0];
1704  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1705  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1706  s->nb_blocks[0] = 1;
1707  s->h_scount[0] = 1;
1708  s->v_scount[0] = 1;
1709  }
1710 
1711  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1712  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1713  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1714  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1715  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1716 
1717 
1718  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1719  for (i = s->mjpb_skiptosod; i > 0; i--)
1720  skip_bits(&s->gb, 8);
1721 
1722 next_field:
1723  for (i = 0; i < nb_components; i++)
1724  s->last_dc[i] = (4 << s->bits);
1725 
1726  if (s->avctx->hwaccel) {
1727  int bytes_to_start = get_bits_count(&s->gb) / 8;
1728  av_assert0(bytes_to_start >= 0 &&
1729  s->raw_scan_buffer_size >= bytes_to_start);
1730 
1731  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1732  s->raw_scan_buffer + bytes_to_start,
1733  s->raw_scan_buffer_size - bytes_to_start);
1734  if (ret < 0)
1735  return ret;
1736 
1737  } else if (s->lossless) {
1738  av_assert0(s->picture_ptr == s->picture);
1739  if (CONFIG_JPEGLS_DECODER && s->ls) {
1740 // for () {
1741 // reset_ls_coding_parameters(s, 0);
1742 
1743  if ((ret = ff_jpegls_decode_picture(s, predictor,
1744  point_transform, ilv)) < 0)
1745  return ret;
1746  } else {
1747  if (s->rgb || s->bayer) {
1748  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1749  return ret;
1750  } else {
1751  if ((ret = ljpeg_decode_yuv_scan(s, predictor,
1752  point_transform,
1753  nb_components)) < 0)
1754  return ret;
1755  }
1756  }
1757  } else {
1758  if (s->progressive && predictor) {
1759  av_assert0(s->picture_ptr == s->picture);
1760  if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor,
1761  ilv, prev_shift,
1762  point_transform)) < 0)
1763  return ret;
1764  } else {
1765  if ((ret = mjpeg_decode_scan(s, nb_components,
1766  prev_shift, point_transform,
1767  mb_bitmask, mb_bitmask_size, reference)) < 0)
1768  return ret;
1769  }
1770  }
1771 
1772  if (s->interlaced &&
1773  get_bits_left(&s->gb) > 32 &&
1774  show_bits(&s->gb, 8) == 0xFF) {
1775  GetBitContext bak = s->gb;
1776  align_get_bits(&bak);
1777  if (show_bits(&bak, 16) == 0xFFD1) {
1778  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1779  s->gb = bak;
1780  skip_bits(&s->gb, 16);
1781  s->bottom_field ^= 1;
1782 
1783  goto next_field;
1784  }
1785  }
1786 
1787  emms_c();
1788  return 0;
1789  out_of_range:
1790  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1791  return AVERROR_INVALIDDATA;
1792 }
1793 
1795 {
1796  if (get_bits(&s->gb, 16) != 4)
1797  return AVERROR_INVALIDDATA;
1798  s->restart_interval = get_bits(&s->gb, 16);
1799  s->restart_count = 0;
1800  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1801  s->restart_interval);
1802 
1803  return 0;
1804 }
1805 
1807 {
1808  int len, id, i;
1809 
1810  len = get_bits(&s->gb, 16);
1811  if (len < 6) {
1812  if (s->bayer) {
1813  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1814  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1815  skip_bits(&s->gb, len);
1816  return 0;
1817  } else
1818  return AVERROR_INVALIDDATA;
1819  }
1820  if (8 * len > get_bits_left(&s->gb))
1821  return AVERROR_INVALIDDATA;
1822 
1823  id = get_bits_long(&s->gb, 32);
1824  len -= 6;
1825 
1826  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1827  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1828  av_fourcc2str(av_bswap32(id)), id, len);
1829 
1830  /* Buggy AVID, it puts EOI only at every 10th frame. */
1831  /* Also, this fourcc is used by non-avid files too, it holds some
1832  information, but it's always present in AVID-created files. */
1833  if (id == AV_RB32("AVI1")) {
1834  /* structure:
1835  4bytes AVI1
1836  1bytes polarity
1837  1bytes always zero
1838  4bytes field_size
1839  4bytes field_size_less_padding
1840  */
1841  s->buggy_avid = 1;
1842  i = get_bits(&s->gb, 8); len--;
1843  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1844  goto out;
1845  }
1846 
1847  if (id == AV_RB32("JFIF")) {
1848  int t_w, t_h, v1, v2;
1849  if (len < 8)
1850  goto out;
1851  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1852  v1 = get_bits(&s->gb, 8);
1853  v2 = get_bits(&s->gb, 8);
1854  skip_bits(&s->gb, 8);
1855 
1856  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1857  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1858  if ( s->avctx->sample_aspect_ratio.num <= 0
1859  || s->avctx->sample_aspect_ratio.den <= 0) {
1860  s->avctx->sample_aspect_ratio.num = 0;
1861  s->avctx->sample_aspect_ratio.den = 1;
1862  }
1863 
1864  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1865  av_log(s->avctx, AV_LOG_INFO,
1866  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1867  v1, v2,
1870 
1871  len -= 8;
1872  if (len >= 2) {
1873  t_w = get_bits(&s->gb, 8);
1874  t_h = get_bits(&s->gb, 8);
1875  if (t_w && t_h) {
1876  /* skip thumbnail */
1877  if (len -10 - (t_w * t_h * 3) > 0)
1878  len -= t_w * t_h * 3;
1879  }
1880  len -= 2;
1881  }
1882  goto out;
1883  }
1884 
1885  if ( id == AV_RB32("Adob")
1886  && len >= 7
1887  && show_bits(&s->gb, 8) == 'e'
1888  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1889  skip_bits(&s->gb, 8); /* 'e' */
1890  skip_bits(&s->gb, 16); /* version */
1891  skip_bits(&s->gb, 16); /* flags0 */
1892  skip_bits(&s->gb, 16); /* flags1 */
1893  s->adobe_transform = get_bits(&s->gb, 8);
1894  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1895  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1896  len -= 7;
1897  goto out;
1898  }
1899 
1900  if (id == AV_RB32("LJIF")) {
1901  int rgb = s->rgb;
1902  int pegasus_rct = s->pegasus_rct;
1903  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1904  av_log(s->avctx, AV_LOG_INFO,
1905  "Pegasus lossless jpeg header found\n");
1906  skip_bits(&s->gb, 16); /* version ? */
1907  skip_bits(&s->gb, 16); /* unknown always 0? */
1908  skip_bits(&s->gb, 16); /* unknown always 0? */
1909  skip_bits(&s->gb, 16); /* unknown always 0? */
1910  switch (i=get_bits(&s->gb, 8)) {
1911  case 1:
1912  rgb = 1;
1913  pegasus_rct = 0;
1914  break;
1915  case 2:
1916  rgb = 1;
1917  pegasus_rct = 1;
1918  break;
1919  default:
1920  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1921  }
1922 
1923  len -= 9;
1924  if (s->got_picture)
1925  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1926  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1927  goto out;
1928  }
1929 
1930  s->rgb = rgb;
1931  s->pegasus_rct = pegasus_rct;
1932 
1933  goto out;
1934  }
1935  if (id == AV_RL32("colr") && len > 0) {
1936  s->colr = get_bits(&s->gb, 8);
1937  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1938  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1939  len --;
1940  goto out;
1941  }
1942  if (id == AV_RL32("xfrm") && len > 0) {
1943  s->xfrm = get_bits(&s->gb, 8);
1944  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1945  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1946  len --;
1947  goto out;
1948  }
1949 
1950  /* JPS extension by VRex */
1951  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1952  int flags, layout, type;
1953  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1954  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1955 
1956  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1957  skip_bits(&s->gb, 16); len -= 2; /* block length */
1958  skip_bits(&s->gb, 8); /* reserved */
1959  flags = get_bits(&s->gb, 8);
1960  layout = get_bits(&s->gb, 8);
1961  type = get_bits(&s->gb, 8);
1962  len -= 4;
1963 
1964  av_freep(&s->stereo3d);
1965  s->stereo3d = av_stereo3d_alloc();
1966  if (!s->stereo3d) {
1967  goto out;
1968  }
1969  if (type == 0) {
1971  } else if (type == 1) {
1972  switch (layout) {
1973  case 0x01:
1975  break;
1976  case 0x02:
1978  break;
1979  case 0x03:
1981  break;
1982  }
1983  if (!(flags & 0x04)) {
1985  }
1986  }
1987  goto out;
1988  }
1989 
1990  /* EXIF metadata */
1991  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
1992  GetByteContext gbytes;
1993  int ret, le, ifd_offset, bytes_read;
1994  const uint8_t *aligned;
1995 
1996  skip_bits(&s->gb, 16); // skip padding
1997  len -= 2;
1998 
1999  // init byte wise reading
2000  aligned = align_get_bits(&s->gb);
2001  bytestream2_init(&gbytes, aligned, len);
2002 
2003  // read TIFF header
2004  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2005  if (ret) {
2006  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2007  } else {
2008  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2009 
2010  // read 0th IFD and store the metadata
2011  // (return values > 0 indicate the presence of subimage metadata)
2012  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2013  if (ret < 0) {
2014  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2015  }
2016  }
2017 
2018  bytes_read = bytestream2_tell(&gbytes);
2019  skip_bits(&s->gb, bytes_read << 3);
2020  len -= bytes_read;
2021 
2022  goto out;
2023  }
2024 
2025  /* Apple MJPEG-A */
2026  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2027  id = get_bits_long(&s->gb, 32);
2028  len -= 4;
2029  /* Apple MJPEG-A */
2030  if (id == AV_RB32("mjpg")) {
2031  /* structure:
2032  4bytes field size
2033  4bytes pad field size
2034  4bytes next off
2035  4bytes quant off
2036  4bytes huff off
2037  4bytes image off
2038  4bytes scan off
2039  4bytes data off
2040  */
2041  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2042  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2043  }
2044  }
2045 
2046  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2047  int id2;
2048  unsigned seqno;
2049  unsigned nummarkers;
2050 
2051  id = get_bits_long(&s->gb, 32);
2052  id2 = get_bits_long(&s->gb, 24);
2053  len -= 7;
2054  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2055  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2056  goto out;
2057  }
2058 
2059  skip_bits(&s->gb, 8);
2060  seqno = get_bits(&s->gb, 8);
2061  len -= 2;
2062  if (seqno == 0) {
2063  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2064  goto out;
2065  }
2066 
2067  nummarkers = get_bits(&s->gb, 8);
2068  len -= 1;
2069  if (nummarkers == 0) {
2070  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2071  goto out;
2072  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2073  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2074  goto out;
2075  } else if (seqno > nummarkers) {
2076  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2077  goto out;
2078  }
2079 
2080  /* Allocate if this is the first APP2 we've seen. */
2081  if (s->iccnum == 0) {
2082  s->iccdata = av_mallocz(nummarkers * sizeof(*(s->iccdata)));
2083  s->iccdatalens = av_mallocz(nummarkers * sizeof(*(s->iccdatalens)));
2084  if (!s->iccdata || !s->iccdatalens) {
2085  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2086  return AVERROR(ENOMEM);
2087  }
2088  s->iccnum = nummarkers;
2089  }
2090 
2091  if (s->iccdata[seqno - 1]) {
2092  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2093  goto out;
2094  }
2095 
2096  s->iccdatalens[seqno - 1] = len;
2097  s->iccdata[seqno - 1] = av_malloc(len);
2098  if (!s->iccdata[seqno - 1]) {
2099  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2100  return AVERROR(ENOMEM);
2101  }
2102 
2103  memcpy(s->iccdata[seqno - 1], align_get_bits(&s->gb), len);
2104  skip_bits(&s->gb, len << 3);
2105  len = 0;
2106  s->iccread++;
2107 
2108  if (s->iccread > s->iccnum)
2109  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2110  }
2111 
2112 out:
2113  /* slow but needed for extreme adobe jpegs */
2114  if (len < 0)
2116  "mjpeg: error, decode_app parser read over the end\n");
2117  while (--len > 0)
2118  skip_bits(&s->gb, 8);
2119 
2120  return 0;
2121 }
2122 
2124 {
2125  int len = get_bits(&s->gb, 16);
2126  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2127  int i;
2128  char *cbuf = av_malloc(len - 1);
2129  if (!cbuf)
2130  return AVERROR(ENOMEM);
2131 
2132  for (i = 0; i < len - 2; i++)
2133  cbuf[i] = get_bits(&s->gb, 8);
2134  if (i > 0 && cbuf[i - 1] == '\n')
2135  cbuf[i - 1] = 0;
2136  else
2137  cbuf[i] = 0;
2138 
2139  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2140  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2141 
2142  /* buggy avid, it puts EOI only at every 10th frame */
2143  if (!strncmp(cbuf, "AVID", 4)) {
2144  parse_avid(s, cbuf, len);
2145  } else if (!strcmp(cbuf, "CS=ITU601"))
2146  s->cs_itu601 = 1;
2147  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2148  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2149  s->flipped = 1;
2150  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2151  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2152  s->multiscope = 2;
2153  }
2154 
2155  av_free(cbuf);
2156  }
2157 
2158  return 0;
2159 }
2160 
2161 /* return the 8 bit start code value and update the search
2162  state. Return -1 if no start code found */
2163 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2164 {
2165  const uint8_t *buf_ptr;
2166  unsigned int v, v2;
2167  int val;
2168  int skipped = 0;
2169 
2170  buf_ptr = *pbuf_ptr;
2171  while (buf_end - buf_ptr > 1) {
2172  v = *buf_ptr++;
2173  v2 = *buf_ptr;
2174  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2175  val = *buf_ptr++;
2176  goto found;
2177  }
2178  skipped++;
2179  }
2180  buf_ptr = buf_end;
2181  val = -1;
2182 found:
2183  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2184  *pbuf_ptr = buf_ptr;
2185  return val;
2186 }
2187 
2189  const uint8_t **buf_ptr, const uint8_t *buf_end,
2190  const uint8_t **unescaped_buf_ptr,
2191  int *unescaped_buf_size)
2192 {
2193  int start_code;
2194  start_code = find_marker(buf_ptr, buf_end);
2195 
2196  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2197  if (!s->buffer)
2198  return AVERROR(ENOMEM);
2199 
2200  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2201  if (start_code == SOS && !s->ls) {
2202  const uint8_t *src = *buf_ptr;
2203  const uint8_t *ptr = src;
2204  uint8_t *dst = s->buffer;
2205 
2206  #define copy_data_segment(skip) do { \
2207  ptrdiff_t length = (ptr - src) - (skip); \
2208  if (length > 0) { \
2209  memcpy(dst, src, length); \
2210  dst += length; \
2211  src = ptr; \
2212  } \
2213  } while (0)
2214 
2215  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2216  ptr = buf_end;
2217  copy_data_segment(0);
2218  } else {
2219  while (ptr < buf_end) {
2220  uint8_t x = *(ptr++);
2221 
2222  if (x == 0xff) {
2223  ptrdiff_t skip = 0;
2224  while (ptr < buf_end && x == 0xff) {
2225  x = *(ptr++);
2226  skip++;
2227  }
2228 
2229  /* 0xFF, 0xFF, ... */
2230  if (skip > 1) {
2231  copy_data_segment(skip);
2232 
2233  /* decrement src as it is equal to ptr after the
2234  * copy_data_segment macro and we might want to
2235  * copy the current value of x later on */
2236  src--;
2237  }
2238 
2239  if (x < RST0 || x > RST7) {
2240  copy_data_segment(1);
2241  if (x)
2242  break;
2243  }
2244  }
2245  }
2246  if (src < ptr)
2247  copy_data_segment(0);
2248  }
2249  #undef copy_data_segment
2250 
2251  *unescaped_buf_ptr = s->buffer;
2252  *unescaped_buf_size = dst - s->buffer;
2253  memset(s->buffer + *unescaped_buf_size, 0,
2255 
2256  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2257  (buf_end - *buf_ptr) - (dst - s->buffer));
2258  } else if (start_code == SOS && s->ls) {
2259  const uint8_t *src = *buf_ptr;
2260  uint8_t *dst = s->buffer;
2261  int bit_count = 0;
2262  int t = 0, b = 0;
2263  PutBitContext pb;
2264 
2265  /* find marker */
2266  while (src + t < buf_end) {
2267  uint8_t x = src[t++];
2268  if (x == 0xff) {
2269  while ((src + t < buf_end) && x == 0xff)
2270  x = src[t++];
2271  if (x & 0x80) {
2272  t -= FFMIN(2, t);
2273  break;
2274  }
2275  }
2276  }
2277  bit_count = t * 8;
2278  init_put_bits(&pb, dst, t);
2279 
2280  /* unescape bitstream */
2281  while (b < t) {
2282  uint8_t x = src[b++];
2283  put_bits(&pb, 8, x);
2284  if (x == 0xFF && b < t) {
2285  x = src[b++];
2286  if (x & 0x80) {
2287  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2288  x &= 0x7f;
2289  }
2290  put_bits(&pb, 7, x);
2291  bit_count--;
2292  }
2293  }
2294  flush_put_bits(&pb);
2295 
2296  *unescaped_buf_ptr = dst;
2297  *unescaped_buf_size = (bit_count + 7) >> 3;
2298  memset(s->buffer + *unescaped_buf_size, 0,
2300  } else {
2301  *unescaped_buf_ptr = *buf_ptr;
2302  *unescaped_buf_size = buf_end - *buf_ptr;
2303  }
2304 
2305  return start_code;
2306 }
2307 
2309 {
2310  int i;
2311 
2312  if (s->iccdata)
2313  for (i = 0; i < s->iccnum; i++)
2314  av_freep(&s->iccdata[i]);
2315  av_freep(&s->iccdata);
2316  av_freep(&s->iccdatalens);
2317 
2318  s->iccread = 0;
2319  s->iccnum = 0;
2320 }
2321 
2322 int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2323  AVPacket *avpkt)
2324 {
2325  AVFrame *frame = data;
2326  const uint8_t *buf = avpkt->data;
2327  int buf_size = avpkt->size;
2328  MJpegDecodeContext *s = avctx->priv_data;
2329  const uint8_t *buf_end, *buf_ptr;
2330  const uint8_t *unescaped_buf_ptr;
2331  int hshift, vshift;
2332  int unescaped_buf_size;
2333  int start_code;
2334  int i, index;
2335  int ret = 0;
2336  int is16bit;
2337 
2338  s->buf_size = buf_size;
2339 
2341  av_freep(&s->stereo3d);
2342  s->adobe_transform = -1;
2343 
2344  if (s->iccnum != 0)
2345  reset_icc_profile(s);
2346 
2347  buf_ptr = buf;
2348  buf_end = buf + buf_size;
2349  while (buf_ptr < buf_end) {
2350  /* find start next marker */
2351  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2352  &unescaped_buf_ptr,
2353  &unescaped_buf_size);
2354  /* EOF */
2355  if (start_code < 0) {
2356  break;
2357  } else if (unescaped_buf_size > INT_MAX / 8) {
2358  av_log(avctx, AV_LOG_ERROR,
2359  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2360  start_code, unescaped_buf_size, buf_size);
2361  return AVERROR_INVALIDDATA;
2362  }
2363  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2364  start_code, buf_end - buf_ptr);
2365 
2366  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2367 
2368  if (ret < 0) {
2369  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2370  goto fail;
2371  }
2372 
2373  s->start_code = start_code;
2374  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2375  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2376 
2377  /* process markers */
2378  if (start_code >= RST0 && start_code <= RST7) {
2379  av_log(avctx, AV_LOG_DEBUG,
2380  "restart marker: %d\n", start_code & 0x0f);
2381  /* APP fields */
2382  } else if (start_code >= APP0 && start_code <= APP15) {
2383  if ((ret = mjpeg_decode_app(s)) < 0)
2384  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2385  av_err2str(ret));
2386  /* Comment */
2387  } else if (start_code == COM) {
2388  ret = mjpeg_decode_com(s);
2389  if (ret < 0)
2390  return ret;
2391  } else if (start_code == DQT) {
2392  ret = ff_mjpeg_decode_dqt(s);
2393  if (ret < 0)
2394  return ret;
2395  }
2396 
2397  ret = -1;
2398 
2399  if (!CONFIG_JPEGLS_DECODER &&
2400  (start_code == SOF48 || start_code == LSE)) {
2401  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2402  return AVERROR(ENOSYS);
2403  }
2404 
2405  if (avctx->skip_frame == AVDISCARD_ALL) {
2406  switch(start_code) {
2407  case SOF0:
2408  case SOF1:
2409  case SOF2:
2410  case SOF3:
2411  case SOF48:
2412  case SOI:
2413  case SOS:
2414  case EOI:
2415  break;
2416  default:
2417  goto skip;
2418  }
2419  }
2420 
2421  switch (start_code) {
2422  case SOI:
2423  s->restart_interval = 0;
2424  s->restart_count = 0;
2425  s->raw_image_buffer = buf_ptr;
2426  s->raw_image_buffer_size = buf_end - buf_ptr;
2427  /* nothing to do on SOI */
2428  break;
2429  case DHT:
2430  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2431  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2432  goto fail;
2433  }
2434  break;
2435  case SOF0:
2436  case SOF1:
2437  if (start_code == SOF0)
2439  else
2441  s->lossless = 0;
2442  s->ls = 0;
2443  s->progressive = 0;
2444  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2445  goto fail;
2446  break;
2447  case SOF2:
2449  s->lossless = 0;
2450  s->ls = 0;
2451  s->progressive = 1;
2452  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2453  goto fail;
2454  break;
2455  case SOF3:
2458  s->lossless = 1;
2459  s->ls = 0;
2460  s->progressive = 0;
2461  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2462  goto fail;
2463  break;
2464  case SOF48:
2467  s->lossless = 1;
2468  s->ls = 1;
2469  s->progressive = 0;
2470  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2471  goto fail;
2472  break;
2473  case LSE:
2474  if (!CONFIG_JPEGLS_DECODER ||
2475  (ret = ff_jpegls_decode_lse(s)) < 0)
2476  goto fail;
2477  break;
2478  case EOI:
2479 eoi_parser:
2480  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2481  s->progressive && s->cur_scan && s->got_picture)
2483  s->cur_scan = 0;
2484  if (!s->got_picture) {
2485  av_log(avctx, AV_LOG_WARNING,
2486  "Found EOI before any SOF, ignoring\n");
2487  break;
2488  }
2489  if (s->interlaced) {
2490  s->bottom_field ^= 1;
2491  /* if not bottom field, do not output image yet */
2492  if (s->bottom_field == !s->interlace_polarity)
2493  break;
2494  }
2495  if (avctx->skip_frame == AVDISCARD_ALL) {
2496  s->got_picture = 0;
2497  goto the_end_no_picture;
2498  }
2499  if (s->avctx->hwaccel) {
2500  ret = s->avctx->hwaccel->end_frame(s->avctx);
2501  if (ret < 0)
2502  return ret;
2503 
2505  }
2506  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2507  return ret;
2508  *got_frame = 1;
2509  s->got_picture = 0;
2510 
2511  if (!s->lossless) {
2512  int qp = FFMAX3(s->qscale[0],
2513  s->qscale[1],
2514  s->qscale[2]);
2515  int qpw = (s->width + 15) / 16;
2516  AVBufferRef *qp_table_buf = av_buffer_alloc(qpw);
2517  if (qp_table_buf) {
2518  memset(qp_table_buf->data, qp, qpw);
2519  av_frame_set_qp_table(data, qp_table_buf, 0, FF_QSCALE_TYPE_MPEG1);
2520  }
2521 
2522  if(avctx->debug & FF_DEBUG_QP)
2523  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2524  }
2525 
2526  goto the_end;
2527  case SOS:
2528  s->raw_scan_buffer = buf_ptr;
2529  s->raw_scan_buffer_size = buf_end - buf_ptr;
2530 
2531  s->cur_scan++;
2532  if (avctx->skip_frame == AVDISCARD_ALL) {
2533  skip_bits(&s->gb, get_bits_left(&s->gb));
2534  break;
2535  }
2536 
2537  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2538  (avctx->err_recognition & AV_EF_EXPLODE))
2539  goto fail;
2540  break;
2541  case DRI:
2542  if ((ret = mjpeg_decode_dri(s)) < 0)
2543  return ret;
2544  break;
2545  case SOF5:
2546  case SOF6:
2547  case SOF7:
2548  case SOF9:
2549  case SOF10:
2550  case SOF11:
2551  case SOF13:
2552  case SOF14:
2553  case SOF15:
2554  case JPG:
2555  av_log(avctx, AV_LOG_ERROR,
2556  "mjpeg: unsupported coding type (%x)\n", start_code);
2557  break;
2558  }
2559 
2560 skip:
2561  /* eof process start code */
2562  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2563  av_log(avctx, AV_LOG_DEBUG,
2564  "marker parser used %d bytes (%d bits)\n",
2565  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2566  }
2567  if (s->got_picture && s->cur_scan) {
2568  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2569  goto eoi_parser;
2570  }
2571  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2572  return AVERROR_INVALIDDATA;
2573 fail:
2574  s->got_picture = 0;
2575  return ret;
2576 the_end:
2577 
2578  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2579 
2580  if (AV_RB32(s->upscale_h)) {
2581  int p;
2583  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2584  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2585  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2586  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2587  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2588  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2589  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2590  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2591  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2592  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2593  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2594  );
2595  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2596  if (ret)
2597  return ret;
2598 
2600  for (p = 0; p<s->nb_components; p++) {
2601  uint8_t *line = s->picture_ptr->data[p];
2602  int w = s->width;
2603  int h = s->height;
2604  if (!s->upscale_h[p])
2605  continue;
2606  if (p==1 || p==2) {
2607  w = AV_CEIL_RSHIFT(w, hshift);
2608  h = AV_CEIL_RSHIFT(h, vshift);
2609  }
2610  if (s->upscale_v[p] == 1)
2611  h = (h+1)>>1;
2612  av_assert0(w > 0);
2613  for (i = 0; i < h; i++) {
2614  if (s->upscale_h[p] == 1) {
2615  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2616  else line[w - 1] = line[(w - 1) / 2];
2617  for (index = w - 2; index > 0; index--) {
2618  if (is16bit)
2619  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2620  else
2621  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2622  }
2623  } else if (s->upscale_h[p] == 2) {
2624  if (is16bit) {
2625  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2626  if (w > 1)
2627  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2628  } else {
2629  line[w - 1] = line[(w - 1) / 3];
2630  if (w > 1)
2631  line[w - 2] = line[w - 1];
2632  }
2633  for (index = w - 3; index > 0; index--) {
2634  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2635  }
2636  }
2637  line += s->linesize[p];
2638  }
2639  }
2640  }
2641  if (AV_RB32(s->upscale_v)) {
2642  int p;
2644  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2645  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2646  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2647  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2648  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2649  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2650  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2651  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2652  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2653  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2654  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2655  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2656  );
2657  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2658  if (ret)
2659  return ret;
2660 
2662  for (p = 0; p < s->nb_components; p++) {
2663  uint8_t *dst;
2664  int w = s->width;
2665  int h = s->height;
2666  if (!s->upscale_v[p])
2667  continue;
2668  if (p==1 || p==2) {
2669  w = AV_CEIL_RSHIFT(w, hshift);
2670  h = AV_CEIL_RSHIFT(h, vshift);
2671  }
2672  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2673  for (i = h - 1; i; i--) {
2674  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2675  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2676  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2677  memcpy(dst, src1, w);
2678  } else {
2679  for (index = 0; index < w; index++)
2680  dst[index] = (src1[index] + src2[index]) >> 1;
2681  }
2682  dst -= s->linesize[p];
2683  }
2684  }
2685  }
2686  if (s->flipped && !s->rgb) {
2687  int j;
2688  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2689  if (ret)
2690  return ret;
2691 
2693  for (index=0; index<s->nb_components; index++) {
2694  uint8_t *dst = s->picture_ptr->data[index];
2695  int w = s->picture_ptr->width;
2696  int h = s->picture_ptr->height;
2697  if(index && index<3){
2698  w = AV_CEIL_RSHIFT(w, hshift);
2699  h = AV_CEIL_RSHIFT(h, vshift);
2700  }
2701  if(dst){
2702  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2703  for (i=0; i<h/2; i++) {
2704  for (j=0; j<w; j++)
2705  FFSWAP(int, dst[j], dst2[j]);
2706  dst += s->picture_ptr->linesize[index];
2707  dst2 -= s->picture_ptr->linesize[index];
2708  }
2709  }
2710  }
2711  }
2712  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2713  int w = s->picture_ptr->width;
2714  int h = s->picture_ptr->height;
2715  av_assert0(s->nb_components == 4);
2716  for (i=0; i<h; i++) {
2717  int j;
2718  uint8_t *dst[4];
2719  for (index=0; index<4; index++) {
2720  dst[index] = s->picture_ptr->data[index]
2721  + s->picture_ptr->linesize[index]*i;
2722  }
2723  for (j=0; j<w; j++) {
2724  int k = dst[3][j];
2725  int r = dst[0][j] * k;
2726  int g = dst[1][j] * k;
2727  int b = dst[2][j] * k;
2728  dst[0][j] = g*257 >> 16;
2729  dst[1][j] = b*257 >> 16;
2730  dst[2][j] = r*257 >> 16;
2731  dst[3][j] = 255;
2732  }
2733  }
2734  }
2735  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2736  int w = s->picture_ptr->width;
2737  int h = s->picture_ptr->height;
2738  av_assert0(s->nb_components == 4);
2739  for (i=0; i<h; i++) {
2740  int j;
2741  uint8_t *dst[4];
2742  for (index=0; index<4; index++) {
2743  dst[index] = s->picture_ptr->data[index]
2744  + s->picture_ptr->linesize[index]*i;
2745  }
2746  for (j=0; j<w; j++) {
2747  int k = dst[3][j];
2748  int r = (255 - dst[0][j]) * k;
2749  int g = (128 - dst[1][j]) * k;
2750  int b = (128 - dst[2][j]) * k;
2751  dst[0][j] = r*257 >> 16;
2752  dst[1][j] = (g*257 >> 16) + 128;
2753  dst[2][j] = (b*257 >> 16) + 128;
2754  dst[3][j] = 255;
2755  }
2756  }
2757  }
2758 
2759  if (s->stereo3d) {
2760  AVStereo3D *stereo = av_stereo3d_create_side_data(data);
2761  if (stereo) {
2762  stereo->type = s->stereo3d->type;
2763  stereo->flags = s->stereo3d->flags;
2764  }
2765  av_freep(&s->stereo3d);
2766  }
2767 
2768  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2769  AVFrameSideData *sd;
2770  size_t offset = 0;
2771  int total_size = 0;
2772  int i;
2773 
2774  /* Sum size of all parts. */
2775  for (i = 0; i < s->iccnum; i++)
2776  total_size += s->iccdatalens[i];
2777 
2778  sd = av_frame_new_side_data(data, AV_FRAME_DATA_ICC_PROFILE, total_size);
2779  if (!sd) {
2780  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2781  return AVERROR(ENOMEM);
2782  }
2783 
2784  /* Reassemble the parts, which are now in-order. */
2785  for (i = 0; i < s->iccnum; i++) {
2786  memcpy(sd->data + offset, s->iccdata[i], s->iccdatalens[i]);
2787  offset += s->iccdatalens[i];
2788  }
2789  }
2790 
2791  av_dict_copy(&((AVFrame *) data)->metadata, s->exif_metadata, 0);
2793 
2794 the_end_no_picture:
2795  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2796  buf_end - buf_ptr);
2797 // return buf_end - buf_ptr;
2798  return buf_ptr - buf;
2799 }
2800 
2802 {
2803  MJpegDecodeContext *s = avctx->priv_data;
2804  int i, j;
2805 
2806  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2807  av_log(avctx, AV_LOG_INFO, "Single field\n");
2808  }
2809 
2810  if (s->picture) {
2811  av_frame_free(&s->picture);
2812  s->picture_ptr = NULL;
2813  } else if (s->picture_ptr)
2815 
2816  av_freep(&s->buffer);
2817  av_freep(&s->stereo3d);
2818  av_freep(&s->ljpeg_buffer);
2819  s->ljpeg_buffer_size = 0;
2820 
2821  for (i = 0; i < 3; i++) {
2822  for (j = 0; j < 4; j++)
2823  ff_free_vlc(&s->vlcs[i][j]);
2824  }
2825  for (i = 0; i < MAX_COMPONENTS; i++) {
2826  av_freep(&s->blocks[i]);
2827  av_freep(&s->last_nnz[i]);
2828  }
2830 
2831  reset_icc_profile(s);
2832 
2834 
2835  return 0;
2836 }
2837 
2838 static void decode_flush(AVCodecContext *avctx)
2839 {
2840  MJpegDecodeContext *s = avctx->priv_data;
2841  s->got_picture = 0;
2842 }
2843 
2844 #if CONFIG_MJPEG_DECODER
2845 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2846 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2847 static const AVOption options[] = {
2848  { "extern_huff", "Use external huffman table.",
2849  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2850  { NULL },
2851 };
2852 
2853 static const AVClass mjpegdec_class = {
2854  .class_name = "MJPEG decoder",
2855  .item_name = av_default_item_name,
2856  .option = options,
2857  .version = LIBAVUTIL_VERSION_INT,
2858 };
2859 
2861  .name = "mjpeg",
2862  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2863  .type = AVMEDIA_TYPE_VIDEO,
2864  .id = AV_CODEC_ID_MJPEG,
2865  .priv_data_size = sizeof(MJpegDecodeContext),
2867  .close = ff_mjpeg_decode_end,
2869  .flush = decode_flush,
2870  .capabilities = AV_CODEC_CAP_DR1,
2871  .max_lowres = 3,
2872  .priv_class = &mjpegdec_class,
2874  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2876  .hw_configs = (const AVCodecHWConfigInternal*[]) {
2877 #if CONFIG_MJPEG_NVDEC_HWACCEL
2878  HWACCEL_NVDEC(mjpeg),
2879 #endif
2880 #if CONFIG_MJPEG_VAAPI_HWACCEL
2881  HWACCEL_VAAPI(mjpeg),
2882 #endif
2883  NULL
2884  },
2885 };
2886 #endif
2887 #if CONFIG_THP_DECODER
2889  .name = "thp",
2890  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2891  .type = AVMEDIA_TYPE_VIDEO,
2892  .id = AV_CODEC_ID_THP,
2893  .priv_data_size = sizeof(MJpegDecodeContext),
2895  .close = ff_mjpeg_decode_end,
2897  .flush = decode_flush,
2898  .capabilities = AV_CODEC_CAP_DR1,
2899  .max_lowres = 3,
2900  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
2901 };
2902 #endif
int block_stride[MAX_COMPONENTS]
Definition: mjpegdec.h:86
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:54
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1370
const struct AVCodec * codec
Definition: avcodec.h:1580
const char const char void * val
Definition: avisynth_c.h:863
const AVPixFmtDescriptor * pix_desc
!< stereoscopic information (cached, since it is read before frame allocation)
Definition: mjpegdec.h:136
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
Definition: mjpeg.h:81
int v_count[MAX_COMPONENTS]
Definition: mjpegdec.h:89
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:263
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:122
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
enum AVPixelFormat hwaccel_sw_pix_fmt
Definition: mjpegdec.h:152
Definition: mjpeg.h:71
#define HWACCEL_NVDEC(codec)
Definition: hwaccel.h:71
Definition: mjpeg.h:111
Definition: mjpeg.h:73
float re
Definition: fft.c:82
Definition: mjpeg.h:40
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2562
Definition: mjpeg.h:42
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
const char * g
Definition: vf_curves.c:115
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:377
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:503
size_t raw_image_buffer_size
Definition: mjpegdec.h:145
void(* clear_block)(int16_t *block)
Definition: blockdsp.h:36
#define avpriv_request_sample(...)
int h_scount[MAX_COMPONENTS]
Definition: mjpegdec.h:94
BlockDSPContext bdsp
Definition: mjpegdec.h:111
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:200
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2123
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2206
TIFF constants & data structures.
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
int qscale[4]
quantizer scale calculated from quant_matrixes
Definition: mjpegdec.h:58
int size
Definition: avcodec.h:1484
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
uint8_t * buffer
Definition: mjpegdec.h:54
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1950
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1781
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
#define copy_data_segment(skip)
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
GLint GLenum type
Definition: opengl_enc.c:104
Definition: mjpeg.h:68
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
int dc_index[MAX_COMPONENTS]
Definition: mjpegdec.h:91
Definition: mjpeg.h:75
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
Definition: mjpeg.h:53
int linesize[MAX_COMPONENTS]
linesize << interlaced
Definition: mjpegdec.h:103
discard all
Definition: avcodec.h:817
uint8_t permutated[64]
Definition: idctdsp.h:33
Views are next to each other.
Definition: stereo3d.h:67
uint8_t upscale_v[4]
Definition: mjpegdec.h:70
uint8_t run
Definition: svq3.c:206
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2802
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:791
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2735
#define src
Definition: vp8dsp.c:254
int profile
profile
Definition: avcodec.h:2904
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:241
AVCodec.
Definition: avcodec.h:3495
EXIF metadata parser.
JPEG-LS decoder.
MJPEG encoder and decoder.
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:3003
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
int comp_index[MAX_COMPONENTS]
Definition: mjpegdec.h:90
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2308
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1581
HpelDSPContext hdsp
Definition: mjpegdec.h:112
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1694
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:3001
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3046
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
const uint8_t * raw_image_buffer
Definition: mjpegdec.h:144
int16_t block[64]
Definition: mjpegdec.h:105
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
Definition: jpegtables.c:127
Definition: mjpeg.h:72
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1794
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:176
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2657
uint16_t(* ljpeg_buffer)[4]
Definition: mjpegdec.h:128
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Definition: mjpeg.h:46
unsigned int ljpeg_buffer_size
Definition: mjpegdec.h:129
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:3234
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:3005
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1672
Definition: mjpeg.h:54
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
uint8_t * last_nnz[MAX_COMPONENTS]
Definition: mjpegdec.h:107
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
AVFrame * picture_ptr
Definition: mjpegdec.h:101
Structure to hold side data for an AVFrame.
Definition: frame.h:201
#define height
uint8_t * data
Definition: avcodec.h:1483
int quant_sindex[MAX_COMPONENTS]
Definition: mjpegdec.h:96
#define MAX_COMPONENTS
Definition: mjpegdec.h:44
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:3004
int h_count[MAX_COMPONENTS]
Definition: mjpegdec.h:88
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define ff_dlog(a,...)
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:378
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:400
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2810
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
ptrdiff_t size
Definition: opengl_enc.c:100
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:429
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2213
#define av_log(a,...)
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
static int aligned(int val)
Definition: dashdec.c:178
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2322
enum AVCodecID id
Definition: avcodec.h:3509
AVDictionary * exif_metadata
Definition: mjpegdec.h:132
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:858
uint8_t ** iccdata
Definition: mjpegdec.h:138
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1022
static const uint16_t mask[17]
Definition: lzw.c:38
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:957
#define PTRDIFF_SPECIFIER
Definition: internal.h:261
int nb_blocks[MAX_COMPONENTS]
Definition: mjpegdec.h:93
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:523
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2801
VLC vlcs[3][4]
Definition: mjpegdec.h:57
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:119
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2550
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:431
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your local see the OFFSET() macro
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1651
Definition: graph2dot.c:48
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: avcodec.h:3502
uint8_t bits
Definition: vp3data.h:202
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1408
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2163
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:122
Definition: mjpeg.h:39
Definition: mjpeg.h:70
Definition: vlc.h:26
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
Definition: mjpegdec.c:53
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
JPEG-LS.
Definition: mjpeg.h:103
Definition: mjpeg.h:79
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
ScanTable scantable
Definition: mjpegdec.h:110
Definition: mjpeg.h:80
#define b
Definition: input.c:41
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1377
Definition: mjpeg.h:56
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:305
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:406
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2700
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:371
#define FFMIN(a, b)
Definition: common.h:96
Definition: mjpeg.h:44
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
uint8_t interlaced
Definition: mxfenc.c:2152
#define width
int component_id[MAX_COMPONENTS]
Definition: mjpegdec.h:87
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1806
#define NEG_USR32(a, s)
Definition: mathops.h:166
uint8_t w
Definition: llviddspenc.c:38
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
uint8_t raw_huffman_lengths[2][4][16]
Definition: mjpegdec.h:149
Definition: mjpeg.h:41
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:3002
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int quant_index[4]
Definition: mjpegdec.h:98
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int v_scount[MAX_COMPONENTS]
Definition: mjpegdec.h:95
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2711
int n
Definition: avisynth_c.h:760
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:706
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
GetBitContext gb
Definition: mjpegdec.h:49
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:72
HW acceleration through CUDA.
Definition: pixfmt.h:235
#define ZERO_RUN
Definition: mjpegdec.c:939
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
#define FF_ARRAY_ELEMS(a)
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:523
int bits
Definition: vlc.h:27
if(ret)
static const float pred[4]
Definition: siprdata.h:259
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:398
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
AVCodec ff_mjpeg_decoder
IDCTDSPContext idsp
Definition: mjpegdec.h:113
#define src1
Definition: h264pred.c:139
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
#define av_bswap32
Definition: bswap.h:33
Libavcodec external API header.
Views are on top of each other.
Definition: stereo3d.h:79
Definition: mjpeg.h:52
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:87
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3764
enum AVCodecID codec_id
Definition: avcodec.h:1581
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
int debug
debug
Definition: avcodec.h:2656
AVStereo3D * stereo3d
Definition: mjpegdec.h:134
main external API structure.
Definition: avcodec.h:1571
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
uint8_t * data
The data buffer.
Definition: buffer.h:89
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:1596
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1968
uint8_t * data
Definition: frame.h:203
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:766
int extradata_size
Definition: avcodec.h:1673
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:130
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:348
int coded_height
Definition: avcodec.h:1759
Describe the class of an AVClass context structure.
Definition: log.h:67
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static const AVProfile profiles[]
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
int index
Definition: gxfenc.c:89
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:773
int ac_index[MAX_COMPONENTS]
Definition: mjpegdec.h:92
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2199
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1057
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
Definition: mjpeg.h:45
uint64_t coefs_finished[MAX_COMPONENTS]
bitmask of which coefs have been completely decoded (progressive mode)
Definition: mjpegdec.h:108
Definition: mjpeg.h:48
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
enum AVPixelFormat hwaccel_pix_fmt
Definition: mjpegdec.h:153
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
uint8_t raw_huffman_values[2][4][256]
Definition: mjpegdec.h:150
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1529
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
#define MIN_CACHE_BITS
Definition: get_bits.h:128
Definition: mjpeg.h:47
#define HWACCEL_VAAPI(codec)
Definition: hwaccel.h:73
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
JPEG-LS extension parameters.
Definition: mjpeg.h:104
#define flags(name, subs,...)
Definition: cbs_av1.c:561
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
size_t raw_scan_buffer_size
Definition: mjpegdec.h:147
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3753
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
uint8_t level
Definition: svq3.c:207
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1616
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:522
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3725
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:139
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:840
Definition: mjpeg.h:94
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:164
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1223
A reference to a data buffer.
Definition: buffer.h:81
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
const OptionDef options[]
Definition: ffmpeg_opt.c:3374
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:60
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
#define FF_DEBUG_QP
Definition: avcodec.h:2661
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:3233
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int den
Denominator.
Definition: rational.h:60
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:796
static int lowres
Definition: ffplay.c:335
const uint8_t * raw_scan_buffer
Definition: mjpegdec.h:146
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
AVCodecContext * avctx
Definition: mjpegdec.h:48
void * priv_data
Definition: avcodec.h:1598
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
#define av_free(p)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2670
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1393
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:447
int got_picture
we found a SOF and picture is valid, too.
Definition: mjpegdec.h:102
int len
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:3773
int16_t(*[MAX_COMPONENTS] blocks)[64]
intermediate sums (progressive mode)
Definition: mjpegdec.h:106
AVFrame * picture
Definition: mjpegdec.h:100
void * hwaccel_picture_private
Definition: mjpegdec.h:154
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
The official guide to swscale for confused that is
Definition: swscale.txt:2
Definition: mjpeg.h:50
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
int last_dc[MAX_COMPONENTS]
Definition: mjpegdec.h:99
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:76
#define REFINE_BIT(j)
Definition: mjpegdec.c:931
uint8_t upscale_h[4]
Definition: mjpegdec.h:69
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2838
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2262
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2228
#define av_always_inline
Definition: attributes.h:39
static const uint8_t start_code[]
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:545
Definition: mjpeg.h:82
#define VD
Definition: cuviddec.c:1118
#define FFSWAP(type, a, b)
Definition: common.h:99
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2188
#define FF_QSCALE_TYPE_MPEG1
Definition: internal.h:81
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
MJPEG decoder.
#define MKTAG(a, b, c, d)
Definition: common.h:366
AVCodec ff_thp_decoder
Definition: mjpeg.h:61
enum AVCodecID id
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1460
uint16_t quant_matrixes[4][64]
Definition: mjpegdec.h:56
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1182
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:987
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:399
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
GLuint buffer
Definition: opengl_enc.c:101
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
Definition: mjpeg.h:49
bitstream writer API