FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "decode.h"
40 #include "hwconfig.h"
41 #include "idctdsp.h"
42 #include "internal.h"
43 #include "jpegtables.h"
44 #include "mjpeg.h"
45 #include "mjpegdec.h"
46 #include "jpeglsdec.h"
47 #include "profiles.h"
48 #include "put_bits.h"
49 #include "tiff.h"
50 #include "exif.h"
51 #include "bytestream.h"
52 
53 
55 {
56  static const struct {
57  int class;
58  int index;
59  const uint8_t *bits;
60  const uint8_t *values;
61  int length;
62  } ht[] = {
64  avpriv_mjpeg_val_dc, 12 },
66  avpriv_mjpeg_val_dc, 12 },
75  };
76  int i, ret;
77 
78  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
79  ff_free_vlc(&s->vlcs[ht[i].class][ht[i].index]);
80  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
81  ht[i].bits, ht[i].values,
82  ht[i].class == 1, s->avctx);
83  if (ret < 0)
84  return ret;
85 
86  if (ht[i].class < 2) {
87  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
88  ht[i].bits + 1, 16);
89  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
90  ht[i].values, ht[i].length);
91  }
92  }
93 
94  return 0;
95 }
96 
97 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
98 {
99  s->buggy_avid = 1;
100  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
101  s->interlace_polarity = 1;
102  if (len > 14 && buf[12] == 2) /* 2 - PAL */
103  s->interlace_polarity = 0;
104  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
105  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
106 }
107 
108 static void init_idct(AVCodecContext *avctx)
109 {
110  MJpegDecodeContext *s = avctx->priv_data;
111 
112  ff_idctdsp_init(&s->idsp, avctx);
115 }
116 
118 {
119  MJpegDecodeContext *s = avctx->priv_data;
120  int ret;
121 
122  if (!s->picture_ptr) {
123  s->picture = av_frame_alloc();
124  if (!s->picture)
125  return AVERROR(ENOMEM);
126  s->picture_ptr = s->picture;
127  }
128 
129  s->pkt = av_packet_alloc();
130  if (!s->pkt)
131  return AVERROR(ENOMEM);
132 
133  s->avctx = avctx;
134  ff_blockdsp_init(&s->bdsp, avctx);
135  ff_hpeldsp_init(&s->hdsp, avctx->flags);
136  init_idct(avctx);
137  s->buffer_size = 0;
138  s->buffer = NULL;
139  s->start_code = -1;
140  s->first_picture = 1;
141  s->got_picture = 0;
142  s->orig_height = avctx->coded_height;
144  avctx->colorspace = AVCOL_SPC_BT470BG;
146 
147  if ((ret = init_default_huffman_tables(s)) < 0)
148  return ret;
149 
150  if (s->extern_huff) {
151  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
152  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
153  return ret;
154  if (ff_mjpeg_decode_dht(s)) {
155  av_log(avctx, AV_LOG_ERROR,
156  "error using external huffman table, switching back to internal\n");
157  if ((ret = init_default_huffman_tables(s)) < 0)
158  return ret;
159  }
160  }
161  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
162  s->interlace_polarity = 1; /* bottom field first */
163  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
164  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
165  if (avctx->codec_tag == AV_RL32("MJPG"))
166  s->interlace_polarity = 1;
167  }
168 
169  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
170  if (avctx->extradata_size >= 4)
172 
173  if (s->smv_frames_per_jpeg <= 0) {
174  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
175  return AVERROR_INVALIDDATA;
176  }
177 
178  s->smv_frame = av_frame_alloc();
179  if (!s->smv_frame)
180  return AVERROR(ENOMEM);
181  } else if (avctx->extradata_size > 8
182  && AV_RL32(avctx->extradata) == 0x2C
183  && AV_RL32(avctx->extradata+4) == 0x18) {
184  parse_avid(s, avctx->extradata, avctx->extradata_size);
185  }
186 
187  if (avctx->codec->id == AV_CODEC_ID_AMV)
188  s->flipped = 1;
189 
190  return 0;
191 }
192 
193 
194 /* quantize tables */
196 {
197  int len, index, i;
198 
199  len = get_bits(&s->gb, 16) - 2;
200 
201  if (8*len > get_bits_left(&s->gb)) {
202  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
203  return AVERROR_INVALIDDATA;
204  }
205 
206  while (len >= 65) {
207  int pr = get_bits(&s->gb, 4);
208  if (pr > 1) {
209  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
210  return AVERROR_INVALIDDATA;
211  }
212  index = get_bits(&s->gb, 4);
213  if (index >= 4)
214  return -1;
215  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
216  /* read quant table */
217  for (i = 0; i < 64; i++) {
218  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
219  if (s->quant_matrixes[index][i] == 0) {
220  av_log(s->avctx, AV_LOG_ERROR, "dqt: 0 quant value\n");
221  return AVERROR_INVALIDDATA;
222  }
223  }
224 
225  // XXX FIXME fine-tune, and perhaps add dc too
226  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
227  s->quant_matrixes[index][8]) >> 1;
228  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
229  index, s->qscale[index]);
230  len -= 1 + 64 * (1+pr);
231  }
232  return 0;
233 }
234 
235 /* decode huffman tables and build VLC decoders */
237 {
238  int len, index, i, class, n, v;
239  uint8_t bits_table[17];
240  uint8_t val_table[256];
241  int ret = 0;
242 
243  len = get_bits(&s->gb, 16) - 2;
244 
245  if (8*len > get_bits_left(&s->gb)) {
246  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
247  return AVERROR_INVALIDDATA;
248  }
249 
250  while (len > 0) {
251  if (len < 17)
252  return AVERROR_INVALIDDATA;
253  class = get_bits(&s->gb, 4);
254  if (class >= 2)
255  return AVERROR_INVALIDDATA;
256  index = get_bits(&s->gb, 4);
257  if (index >= 4)
258  return AVERROR_INVALIDDATA;
259  n = 0;
260  for (i = 1; i <= 16; i++) {
261  bits_table[i] = get_bits(&s->gb, 8);
262  n += bits_table[i];
263  }
264  len -= 17;
265  if (len < n || n > 256)
266  return AVERROR_INVALIDDATA;
267 
268  for (i = 0; i < n; i++) {
269  v = get_bits(&s->gb, 8);
270  val_table[i] = v;
271  }
272  len -= n;
273 
274  /* build VLC and flush previous vlc if present */
275  ff_free_vlc(&s->vlcs[class][index]);
276  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
277  class, index, n);
278  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
279  val_table, class > 0, s->avctx)) < 0)
280  return ret;
281 
282  if (class > 0) {
283  ff_free_vlc(&s->vlcs[2][index]);
284  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
285  val_table, 0, s->avctx)) < 0)
286  return ret;
287  }
288 
289  for (i = 0; i < 16; i++)
290  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
291  for (i = 0; i < 256; i++)
292  s->raw_huffman_values[class][index][i] = val_table[i];
293  }
294  return 0;
295 }
296 
298 {
299  int len, nb_components, i, width, height, bits, ret, size_change;
300  unsigned pix_fmt_id;
301  int h_count[MAX_COMPONENTS] = { 0 };
302  int v_count[MAX_COMPONENTS] = { 0 };
303 
304  s->cur_scan = 0;
305  memset(s->upscale_h, 0, sizeof(s->upscale_h));
306  memset(s->upscale_v, 0, sizeof(s->upscale_v));
307 
308  len = get_bits(&s->gb, 16);
309  bits = get_bits(&s->gb, 8);
310 
311  if (bits > 16 || bits < 1) {
312  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
313  return AVERROR_INVALIDDATA;
314  }
315 
316  if (s->avctx->bits_per_raw_sample != bits) {
317  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
319  init_idct(s->avctx);
320  }
321  if (s->pegasus_rct)
322  bits = 9;
323  if (bits == 9 && !s->pegasus_rct)
324  s->rct = 1; // FIXME ugly
325 
326  if(s->lossless && s->avctx->lowres){
327  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
328  return -1;
329  }
330 
331  height = get_bits(&s->gb, 16);
332  width = get_bits(&s->gb, 16);
333 
334  // HACK for odd_height.mov
335  if (s->interlaced && s->width == width && s->height == height + 1)
336  height= s->height;
337 
338  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
339  if (av_image_check_size(width, height, 0, s->avctx) < 0)
340  return AVERROR_INVALIDDATA;
341  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
342  return AVERROR_INVALIDDATA;
343 
344  nb_components = get_bits(&s->gb, 8);
345  if (nb_components <= 0 ||
346  nb_components > MAX_COMPONENTS)
347  return -1;
348  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
349  if (nb_components != s->nb_components) {
351  "nb_components changing in interlaced picture\n");
352  return AVERROR_INVALIDDATA;
353  }
354  }
355  if (s->ls && !(bits <= 8 || nb_components == 1)) {
357  "JPEG-LS that is not <= 8 "
358  "bits/component or 16-bit gray");
359  return AVERROR_PATCHWELCOME;
360  }
361  if (len != 8 + 3 * nb_components) {
362  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
363  return AVERROR_INVALIDDATA;
364  }
365 
366  s->nb_components = nb_components;
367  s->h_max = 1;
368  s->v_max = 1;
369  for (i = 0; i < nb_components; i++) {
370  /* component id */
371  s->component_id[i] = get_bits(&s->gb, 8) - 1;
372  h_count[i] = get_bits(&s->gb, 4);
373  v_count[i] = get_bits(&s->gb, 4);
374  /* compute hmax and vmax (only used in interleaved case) */
375  if (h_count[i] > s->h_max)
376  s->h_max = h_count[i];
377  if (v_count[i] > s->v_max)
378  s->v_max = v_count[i];
379  s->quant_index[i] = get_bits(&s->gb, 8);
380  if (s->quant_index[i] >= 4) {
381  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
382  return AVERROR_INVALIDDATA;
383  }
384  if (!h_count[i] || !v_count[i]) {
386  "Invalid sampling factor in component %d %d:%d\n",
387  i, h_count[i], v_count[i]);
388  return AVERROR_INVALIDDATA;
389  }
390 
391  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
392  i, h_count[i], v_count[i],
393  s->component_id[i], s->quant_index[i]);
394  }
395  if ( nb_components == 4
396  && s->component_id[0] == 'C' - 1
397  && s->component_id[1] == 'M' - 1
398  && s->component_id[2] == 'Y' - 1
399  && s->component_id[3] == 'K' - 1)
400  s->adobe_transform = 0;
401 
402  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
403  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
404  return AVERROR_PATCHWELCOME;
405  }
406 
407  if (s->bayer) {
408  if (nb_components == 2) {
409  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
410  width stored in their SOF3 markers is the width of each one. We only output
411  a single component, therefore we need to adjust the output image width. We
412  handle the deinterleaving (but not the debayering) in this file. */
413  width *= 2;
414  }
415  /* They can also contain 1 component, which is double the width and half the height
416  of the final image (rows are interleaved). We don't handle the decoding in this
417  file, but leave that to the TIFF/DNG decoder. */
418  }
419 
420  /* if different size, realloc/alloc picture */
421  if (width != s->width || height != s->height || bits != s->bits ||
422  memcmp(s->h_count, h_count, sizeof(h_count)) ||
423  memcmp(s->v_count, v_count, sizeof(v_count))) {
424  size_change = 1;
425 
426  s->width = width;
427  s->height = height;
428  s->bits = bits;
429  memcpy(s->h_count, h_count, sizeof(h_count));
430  memcpy(s->v_count, v_count, sizeof(v_count));
431  s->interlaced = 0;
432  s->got_picture = 0;
433 
434  /* test interlaced mode */
435  if (s->first_picture &&
436  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
437  s->orig_height != 0 &&
438  s->height < ((s->orig_height * 3) / 4)) {
439  s->interlaced = 1;
443  height *= 2;
444  }
445 
446  ret = ff_set_dimensions(s->avctx, width, height);
447  if (ret < 0)
448  return ret;
449 
450  if ((s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
451  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
452  s->orig_height < height)
454 
455  s->first_picture = 0;
456  } else {
457  size_change = 0;
458  }
459 
460  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
462  if (s->avctx->height <= 0)
463  return AVERROR_INVALIDDATA;
464  }
465 
466  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
467  if (s->progressive) {
468  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
469  return AVERROR_INVALIDDATA;
470  }
471  } else {
472  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
473  s->rgb = 1;
474  else if (!s->lossless)
475  s->rgb = 0;
476  /* XXX: not complete test ! */
477  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
478  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
479  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
480  (s->h_count[3] << 4) | s->v_count[3];
481  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
482  /* NOTE we do not allocate pictures large enough for the possible
483  * padding of h/v_count being 4 */
484  if (!(pix_fmt_id & 0xD0D0D0D0))
485  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
486  if (!(pix_fmt_id & 0x0D0D0D0D))
487  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
488 
489  for (i = 0; i < 8; i++) {
490  int j = 6 + (i&1) - (i&6);
491  int is = (pix_fmt_id >> (4*i)) & 0xF;
492  int js = (pix_fmt_id >> (4*j)) & 0xF;
493 
494  if (is == 1 && js != 2 && (i < 2 || i > 5))
495  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
496  if (is == 1 && js != 2 && (i < 2 || i > 5))
497  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
498 
499  if (is == 1 && js == 2) {
500  if (i & 1) s->upscale_h[j/2] = 1;
501  else s->upscale_v[j/2] = 1;
502  }
503  }
504 
505  if (s->bayer) {
506  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
507  goto unk_pixfmt;
508  }
509 
510  switch (pix_fmt_id) {
511  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
512  if (!s->bayer)
513  goto unk_pixfmt;
515  break;
516  case 0x11111100:
517  if (s->rgb)
519  else {
520  if ( s->adobe_transform == 0
521  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
523  } else {
527  }
528  }
529  av_assert0(s->nb_components == 3);
530  break;
531  case 0x11111111:
532  if (s->rgb)
534  else {
535  if (s->adobe_transform == 0 && s->bits <= 8) {
537  } else {
540  }
541  }
542  av_assert0(s->nb_components == 4);
543  break;
544  case 0x22111122:
545  case 0x22111111:
546  if (s->adobe_transform == 0 && s->bits <= 8) {
548  s->upscale_v[1] = s->upscale_v[2] = 1;
549  s->upscale_h[1] = s->upscale_h[2] = 1;
550  } else if (s->adobe_transform == 2 && s->bits <= 8) {
552  s->upscale_v[1] = s->upscale_v[2] = 1;
553  s->upscale_h[1] = s->upscale_h[2] = 1;
555  } else {
556  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
559  }
560  av_assert0(s->nb_components == 4);
561  break;
562  case 0x12121100:
563  case 0x22122100:
564  case 0x21211100:
565  case 0x21112100:
566  case 0x22211200:
567  case 0x22221100:
568  case 0x22112200:
569  case 0x11222200:
571  else
572  goto unk_pixfmt;
574  break;
575  case 0x11000000:
576  case 0x13000000:
577  case 0x14000000:
578  case 0x31000000:
579  case 0x33000000:
580  case 0x34000000:
581  case 0x41000000:
582  case 0x43000000:
583  case 0x44000000:
584  if(s->bits <= 8)
586  else
588  break;
589  case 0x12111100:
590  case 0x14121200:
591  case 0x14111100:
592  case 0x22211100:
593  case 0x22112100:
594  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
595  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
596  else
597  goto unk_pixfmt;
598  s->upscale_v[0] = s->upscale_v[1] = 1;
599  } else {
600  if (pix_fmt_id == 0x14111100)
601  s->upscale_v[1] = s->upscale_v[2] = 1;
603  else
604  goto unk_pixfmt;
606  }
607  break;
608  case 0x21111100:
609  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
610  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
611  else
612  goto unk_pixfmt;
613  s->upscale_h[0] = s->upscale_h[1] = 1;
614  } else {
618  }
619  break;
620  case 0x31111100:
621  if (s->bits > 8)
622  goto unk_pixfmt;
625  s->upscale_h[1] = s->upscale_h[2] = 2;
626  break;
627  case 0x22121100:
628  case 0x22111200:
630  else
631  goto unk_pixfmt;
633  break;
634  case 0x22111100:
635  case 0x23111100:
636  case 0x42111100:
637  case 0x24111100:
641  if (pix_fmt_id == 0x42111100) {
642  if (s->bits > 8)
643  goto unk_pixfmt;
644  s->upscale_h[1] = s->upscale_h[2] = 1;
645  } else if (pix_fmt_id == 0x24111100) {
646  if (s->bits > 8)
647  goto unk_pixfmt;
648  s->upscale_v[1] = s->upscale_v[2] = 1;
649  } else if (pix_fmt_id == 0x23111100) {
650  if (s->bits > 8)
651  goto unk_pixfmt;
652  s->upscale_v[1] = s->upscale_v[2] = 2;
653  }
654  break;
655  case 0x41111100:
657  else
658  goto unk_pixfmt;
660  break;
661  default:
662  unk_pixfmt:
663  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
664  memset(s->upscale_h, 0, sizeof(s->upscale_h));
665  memset(s->upscale_v, 0, sizeof(s->upscale_v));
666  return AVERROR_PATCHWELCOME;
667  }
668  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
669  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
670  return AVERROR_PATCHWELCOME;
671  }
672  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
673  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
674  return AVERROR_PATCHWELCOME;
675  }
676  if (s->ls) {
677  memset(s->upscale_h, 0, sizeof(s->upscale_h));
678  memset(s->upscale_v, 0, sizeof(s->upscale_v));
679  if (s->nb_components == 3) {
681  } else if (s->nb_components != 1) {
682  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
683  return AVERROR_PATCHWELCOME;
684  } else if (s->palette_index && s->bits <= 8)
686  else if (s->bits <= 8)
688  else
690  }
691 
693  if (!s->pix_desc) {
694  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
695  return AVERROR_BUG;
696  }
697 
698  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
699  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
700  } else {
701  enum AVPixelFormat pix_fmts[] = {
702 #if CONFIG_MJPEG_NVDEC_HWACCEL
704 #endif
705 #if CONFIG_MJPEG_VAAPI_HWACCEL
707 #endif
708  s->avctx->pix_fmt,
710  };
711  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
712  if (s->hwaccel_pix_fmt < 0)
713  return AVERROR(EINVAL);
714 
716  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
717  }
718 
719  if (s->avctx->skip_frame == AVDISCARD_ALL) {
721  s->picture_ptr->key_frame = 1;
722  s->got_picture = 1;
723  return 0;
724  }
725 
728  return -1;
730  s->picture_ptr->key_frame = 1;
731  s->got_picture = 1;
732 
733  for (i = 0; i < 4; i++)
734  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
735 
736  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
737  s->width, s->height, s->linesize[0], s->linesize[1],
738  s->interlaced, s->avctx->height);
739 
740  }
741 
742  if ((s->rgb && !s->lossless && !s->ls) ||
743  (!s->rgb && s->ls && s->nb_components > 1) ||
744  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
745  ) {
746  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
747  return AVERROR_PATCHWELCOME;
748  }
749 
750  /* totally blank picture as progressive JPEG will only add details to it */
751  if (s->progressive) {
752  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
753  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
754  for (i = 0; i < s->nb_components; i++) {
755  int size = bw * bh * s->h_count[i] * s->v_count[i];
756  av_freep(&s->blocks[i]);
757  av_freep(&s->last_nnz[i]);
758  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
759  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
760  if (!s->blocks[i] || !s->last_nnz[i])
761  return AVERROR(ENOMEM);
762  s->block_stride[i] = bw * s->h_count[i];
763  }
764  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
765  }
766 
767  if (s->avctx->hwaccel) {
770  if (!s->hwaccel_picture_private)
771  return AVERROR(ENOMEM);
772 
773  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
775  if (ret < 0)
776  return ret;
777  }
778 
779  return 0;
780 }
781 
782 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
783 {
784  int code;
785  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
786  if (code < 0 || code > 16) {
788  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
789  0, dc_index, &s->vlcs[0][dc_index]);
790  return 0xfffff;
791  }
792 
793  if (code)
794  return get_xbits(&s->gb, code);
795  else
796  return 0;
797 }
798 
799 /* decode block and dequantize */
800 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
801  int dc_index, int ac_index, uint16_t *quant_matrix)
802 {
803  int code, i, j, level, val;
804 
805  /* DC coef */
806  val = mjpeg_decode_dc(s, dc_index);
807  if (val == 0xfffff) {
808  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
809  return AVERROR_INVALIDDATA;
810  }
811  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
812  val = av_clip_int16(val);
813  s->last_dc[component] = val;
814  block[0] = val;
815  /* AC coefs */
816  i = 0;
817  {OPEN_READER(re, &s->gb);
818  do {
819  UPDATE_CACHE(re, &s->gb);
820  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
821 
822  i += ((unsigned)code) >> 4;
823  code &= 0xf;
824  if (code) {
825  if (code > MIN_CACHE_BITS - 16)
826  UPDATE_CACHE(re, &s->gb);
827 
828  {
829  int cache = GET_CACHE(re, &s->gb);
830  int sign = (~cache) >> 31;
831  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
832  }
833 
834  LAST_SKIP_BITS(re, &s->gb, code);
835 
836  if (i > 63) {
837  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
838  return AVERROR_INVALIDDATA;
839  }
840  j = s->scantable.permutated[i];
841  block[j] = level * quant_matrix[i];
842  }
843  } while (i < 63);
844  CLOSE_READER(re, &s->gb);}
845 
846  return 0;
847 }
848 
850  int component, int dc_index,
851  uint16_t *quant_matrix, int Al)
852 {
853  unsigned val;
854  s->bdsp.clear_block(block);
855  val = mjpeg_decode_dc(s, dc_index);
856  if (val == 0xfffff) {
857  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
858  return AVERROR_INVALIDDATA;
859  }
860  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
861  s->last_dc[component] = val;
862  block[0] = val;
863  return 0;
864 }
865 
866 /* decode block and dequantize - progressive JPEG version */
868  uint8_t *last_nnz, int ac_index,
869  uint16_t *quant_matrix,
870  int ss, int se, int Al, int *EOBRUN)
871 {
872  int code, i, j, val, run;
873  unsigned level;
874 
875  if (*EOBRUN) {
876  (*EOBRUN)--;
877  return 0;
878  }
879 
880  {
881  OPEN_READER(re, &s->gb);
882  for (i = ss; ; i++) {
883  UPDATE_CACHE(re, &s->gb);
884  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
885 
886  run = ((unsigned) code) >> 4;
887  code &= 0xF;
888  if (code) {
889  i += run;
890  if (code > MIN_CACHE_BITS - 16)
891  UPDATE_CACHE(re, &s->gb);
892 
893  {
894  int cache = GET_CACHE(re, &s->gb);
895  int sign = (~cache) >> 31;
896  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
897  }
898 
899  LAST_SKIP_BITS(re, &s->gb, code);
900 
901  if (i >= se) {
902  if (i == se) {
903  j = s->scantable.permutated[se];
904  block[j] = level * (quant_matrix[se] << Al);
905  break;
906  }
907  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
908  return AVERROR_INVALIDDATA;
909  }
910  j = s->scantable.permutated[i];
911  block[j] = level * (quant_matrix[i] << Al);
912  } else {
913  if (run == 0xF) {// ZRL - skip 15 coefficients
914  i += 15;
915  if (i >= se) {
916  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
917  return AVERROR_INVALIDDATA;
918  }
919  } else {
920  val = (1 << run);
921  if (run) {
922  UPDATE_CACHE(re, &s->gb);
923  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
924  LAST_SKIP_BITS(re, &s->gb, run);
925  }
926  *EOBRUN = val - 1;
927  break;
928  }
929  }
930  }
931  CLOSE_READER(re, &s->gb);
932  }
933 
934  if (i > *last_nnz)
935  *last_nnz = i;
936 
937  return 0;
938 }
939 
940 #define REFINE_BIT(j) { \
941  UPDATE_CACHE(re, &s->gb); \
942  sign = block[j] >> 15; \
943  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
944  ((quant_matrix[i] ^ sign) - sign) << Al; \
945  LAST_SKIP_BITS(re, &s->gb, 1); \
946 }
947 
948 #define ZERO_RUN \
949 for (; ; i++) { \
950  if (i > last) { \
951  i += run; \
952  if (i > se) { \
953  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
954  return -1; \
955  } \
956  break; \
957  } \
958  j = s->scantable.permutated[i]; \
959  if (block[j]) \
960  REFINE_BIT(j) \
961  else if (run-- == 0) \
962  break; \
963 }
964 
965 /* decode block and dequantize - progressive JPEG refinement pass */
967  uint8_t *last_nnz,
968  int ac_index, uint16_t *quant_matrix,
969  int ss, int se, int Al, int *EOBRUN)
970 {
971  int code, i = ss, j, sign, val, run;
972  int last = FFMIN(se, *last_nnz);
973 
974  OPEN_READER(re, &s->gb);
975  if (*EOBRUN) {
976  (*EOBRUN)--;
977  } else {
978  for (; ; i++) {
979  UPDATE_CACHE(re, &s->gb);
980  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
981 
982  if (code & 0xF) {
983  run = ((unsigned) code) >> 4;
984  UPDATE_CACHE(re, &s->gb);
985  val = SHOW_UBITS(re, &s->gb, 1);
986  LAST_SKIP_BITS(re, &s->gb, 1);
987  ZERO_RUN;
988  j = s->scantable.permutated[i];
989  val--;
990  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
991  if (i == se) {
992  if (i > *last_nnz)
993  *last_nnz = i;
994  CLOSE_READER(re, &s->gb);
995  return 0;
996  }
997  } else {
998  run = ((unsigned) code) >> 4;
999  if (run == 0xF) {
1000  ZERO_RUN;
1001  } else {
1002  val = run;
1003  run = (1 << run);
1004  if (val) {
1005  UPDATE_CACHE(re, &s->gb);
1006  run += SHOW_UBITS(re, &s->gb, val);
1007  LAST_SKIP_BITS(re, &s->gb, val);
1008  }
1009  *EOBRUN = run - 1;
1010  break;
1011  }
1012  }
1013  }
1014 
1015  if (i > *last_nnz)
1016  *last_nnz = i;
1017  }
1018 
1019  for (; i <= last; i++) {
1020  j = s->scantable.permutated[i];
1021  if (block[j])
1022  REFINE_BIT(j)
1023  }
1024  CLOSE_READER(re, &s->gb);
1025 
1026  return 0;
1027 }
1028 #undef REFINE_BIT
1029 #undef ZERO_RUN
1030 
1031 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1032 {
1033  int i;
1034  int reset = 0;
1035 
1036  if (s->restart_interval) {
1037  s->restart_count--;
1038  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1039  align_get_bits(&s->gb);
1040  for (i = 0; i < nb_components; i++) /* reset dc */
1041  s->last_dc[i] = (4 << s->bits);
1042  }
1043 
1044  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1045  /* skip RSTn */
1046  if (s->restart_count == 0) {
1047  if( show_bits(&s->gb, i) == (1 << i) - 1
1048  || show_bits(&s->gb, i) == 0xFF) {
1049  int pos = get_bits_count(&s->gb);
1050  align_get_bits(&s->gb);
1051  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1052  skip_bits(&s->gb, 8);
1053  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1054  for (i = 0; i < nb_components; i++) /* reset dc */
1055  s->last_dc[i] = (4 << s->bits);
1056  reset = 1;
1057  } else
1058  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1059  }
1060  }
1061  }
1062  return reset;
1063 }
1064 
1065 /* Handles 1 to 4 components */
1066 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1067 {
1068  int i, mb_x, mb_y;
1069  unsigned width;
1070  uint16_t (*buffer)[4];
1071  int left[4], top[4], topleft[4];
1072  const int linesize = s->linesize[0];
1073  const int mask = ((1 << s->bits) - 1) << point_transform;
1074  int resync_mb_y = 0;
1075  int resync_mb_x = 0;
1076  int vpred[6];
1077 
1078  if (!s->bayer && s->nb_components < 3)
1079  return AVERROR_INVALIDDATA;
1080  if (s->bayer && s->nb_components > 2)
1081  return AVERROR_INVALIDDATA;
1082  if (s->nb_components <= 0 || s->nb_components > 4)
1083  return AVERROR_INVALIDDATA;
1084  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1085  return AVERROR_INVALIDDATA;
1086 
1087 
1089 
1090  if (s->restart_interval == 0)
1091  s->restart_interval = INT_MAX;
1092 
1093  if (s->bayer)
1094  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1095  else
1096  width = s->mb_width;
1097 
1098  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1099  if (!s->ljpeg_buffer)
1100  return AVERROR(ENOMEM);
1101 
1102  buffer = s->ljpeg_buffer;
1103 
1104  for (i = 0; i < 4; i++)
1105  buffer[0][i] = 1 << (s->bits - 1);
1106 
1107  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1108  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1109 
1110  if (s->interlaced && s->bottom_field)
1111  ptr += linesize >> 1;
1112 
1113  for (i = 0; i < 4; i++)
1114  top[i] = left[i] = topleft[i] = buffer[0][i];
1115 
1116  if ((mb_y * s->width) % s->restart_interval == 0) {
1117  for (i = 0; i < 6; i++)
1118  vpred[i] = 1 << (s->bits-1);
1119  }
1120 
1121  for (mb_x = 0; mb_x < width; mb_x++) {
1122  int modified_predictor = predictor;
1123 
1124  if (get_bits_left(&s->gb) < 1) {
1125  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1126  return AVERROR_INVALIDDATA;
1127  }
1128 
1129  if (s->restart_interval && !s->restart_count){
1131  resync_mb_x = mb_x;
1132  resync_mb_y = mb_y;
1133  for(i=0; i<4; i++)
1134  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1135  }
1136  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1137  modified_predictor = 1;
1138 
1139  for (i=0;i<nb_components;i++) {
1140  int pred, dc;
1141 
1142  topleft[i] = top[i];
1143  top[i] = buffer[mb_x][i];
1144 
1145  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1146  if(dc == 0xFFFFF)
1147  return -1;
1148 
1149  if (!s->bayer || mb_x) {
1150  pred = left[i];
1151  } else { /* This path runs only for the first line in bayer images */
1152  vpred[i] += dc;
1153  pred = vpred[i] - dc;
1154  }
1155 
1156  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1157 
1158  left[i] = buffer[mb_x][i] =
1159  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1160  }
1161 
1162  if (s->restart_interval && !--s->restart_count) {
1163  align_get_bits(&s->gb);
1164  skip_bits(&s->gb, 16); /* skip RSTn */
1165  }
1166  }
1167  if (s->rct && s->nb_components == 4) {
1168  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1169  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1170  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1171  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1172  ptr[4*mb_x + 0] = buffer[mb_x][3];
1173  }
1174  } else if (s->nb_components == 4) {
1175  for(i=0; i<nb_components; i++) {
1176  int c= s->comp_index[i];
1177  if (s->bits <= 8) {
1178  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1179  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1180  }
1181  } else if(s->bits == 9) {
1182  return AVERROR_PATCHWELCOME;
1183  } else {
1184  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1185  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1186  }
1187  }
1188  }
1189  } else if (s->rct) {
1190  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1191  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1192  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1193  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1194  }
1195  } else if (s->pegasus_rct) {
1196  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1197  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1198  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1199  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1200  }
1201  } else if (s->bayer) {
1202  if (nb_components == 1) {
1203  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1204  for (mb_x = 0; mb_x < width; mb_x++)
1205  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1206  } else if (nb_components == 2) {
1207  for (mb_x = 0; mb_x < width; mb_x++) {
1208  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1209  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1210  }
1211  }
1212  } else {
1213  for(i=0; i<nb_components; i++) {
1214  int c= s->comp_index[i];
1215  if (s->bits <= 8) {
1216  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1217  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1218  }
1219  } else if(s->bits == 9) {
1220  return AVERROR_PATCHWELCOME;
1221  } else {
1222  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1223  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1224  }
1225  }
1226  }
1227  }
1228  }
1229  return 0;
1230 }
1231 
1233  int point_transform, int nb_components)
1234 {
1235  int i, mb_x, mb_y, mask;
1236  int bits= (s->bits+7)&~7;
1237  int resync_mb_y = 0;
1238  int resync_mb_x = 0;
1239 
1240  point_transform += bits - s->bits;
1241  mask = ((1 << s->bits) - 1) << point_transform;
1242 
1243  av_assert0(nb_components>=1 && nb_components<=4);
1244 
1245  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1246  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1247  if (get_bits_left(&s->gb) < 1) {
1248  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1249  return AVERROR_INVALIDDATA;
1250  }
1251  if (s->restart_interval && !s->restart_count){
1253  resync_mb_x = mb_x;
1254  resync_mb_y = mb_y;
1255  }
1256 
1257  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1258  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1259  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1260  for (i = 0; i < nb_components; i++) {
1261  uint8_t *ptr;
1262  uint16_t *ptr16;
1263  int n, h, v, x, y, c, j, linesize;
1264  n = s->nb_blocks[i];
1265  c = s->comp_index[i];
1266  h = s->h_scount[i];
1267  v = s->v_scount[i];
1268  x = 0;
1269  y = 0;
1270  linesize= s->linesize[c];
1271 
1272  if(bits>8) linesize /= 2;
1273 
1274  for(j=0; j<n; j++) {
1275  int pred, dc;
1276 
1277  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1278  if(dc == 0xFFFFF)
1279  return -1;
1280  if ( h * mb_x + x >= s->width
1281  || v * mb_y + y >= s->height) {
1282  // Nothing to do
1283  } else if (bits<=8) {
1284  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1285  if(y==0 && toprow){
1286  if(x==0 && leftcol){
1287  pred= 1 << (bits - 1);
1288  }else{
1289  pred= ptr[-1];
1290  }
1291  }else{
1292  if(x==0 && leftcol){
1293  pred= ptr[-linesize];
1294  }else{
1295  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1296  }
1297  }
1298 
1299  if (s->interlaced && s->bottom_field)
1300  ptr += linesize >> 1;
1301  pred &= mask;
1302  *ptr= pred + ((unsigned)dc << point_transform);
1303  }else{
1304  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1305  if(y==0 && toprow){
1306  if(x==0 && leftcol){
1307  pred= 1 << (bits - 1);
1308  }else{
1309  pred= ptr16[-1];
1310  }
1311  }else{
1312  if(x==0 && leftcol){
1313  pred= ptr16[-linesize];
1314  }else{
1315  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1316  }
1317  }
1318 
1319  if (s->interlaced && s->bottom_field)
1320  ptr16 += linesize >> 1;
1321  pred &= mask;
1322  *ptr16= pred + ((unsigned)dc << point_transform);
1323  }
1324  if (++x == h) {
1325  x = 0;
1326  y++;
1327  }
1328  }
1329  }
1330  } else {
1331  for (i = 0; i < nb_components; i++) {
1332  uint8_t *ptr;
1333  uint16_t *ptr16;
1334  int n, h, v, x, y, c, j, linesize, dc;
1335  n = s->nb_blocks[i];
1336  c = s->comp_index[i];
1337  h = s->h_scount[i];
1338  v = s->v_scount[i];
1339  x = 0;
1340  y = 0;
1341  linesize = s->linesize[c];
1342 
1343  if(bits>8) linesize /= 2;
1344 
1345  for (j = 0; j < n; j++) {
1346  int pred;
1347 
1348  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1349  if(dc == 0xFFFFF)
1350  return -1;
1351  if ( h * mb_x + x >= s->width
1352  || v * mb_y + y >= s->height) {
1353  // Nothing to do
1354  } else if (bits<=8) {
1355  ptr = s->picture_ptr->data[c] +
1356  (linesize * (v * mb_y + y)) +
1357  (h * mb_x + x); //FIXME optimize this crap
1358  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1359 
1360  pred &= mask;
1361  *ptr = pred + ((unsigned)dc << point_transform);
1362  }else{
1363  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1364  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1365 
1366  pred &= mask;
1367  *ptr16= pred + ((unsigned)dc << point_transform);
1368  }
1369 
1370  if (++x == h) {
1371  x = 0;
1372  y++;
1373  }
1374  }
1375  }
1376  }
1377  if (s->restart_interval && !--s->restart_count) {
1378  align_get_bits(&s->gb);
1379  skip_bits(&s->gb, 16); /* skip RSTn */
1380  }
1381  }
1382  }
1383  return 0;
1384 }
1385 
1387  uint8_t *dst, const uint8_t *src,
1388  int linesize, int lowres)
1389 {
1390  switch (lowres) {
1391  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1392  break;
1393  case 1: copy_block4(dst, src, linesize, linesize, 4);
1394  break;
1395  case 2: copy_block2(dst, src, linesize, linesize, 2);
1396  break;
1397  case 3: *dst = *src;
1398  break;
1399  }
1400 }
1401 
1402 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1403 {
1404  int block_x, block_y;
1405  int size = 8 >> s->avctx->lowres;
1406  if (s->bits > 8) {
1407  for (block_y=0; block_y<size; block_y++)
1408  for (block_x=0; block_x<size; block_x++)
1409  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1410  } else {
1411  for (block_y=0; block_y<size; block_y++)
1412  for (block_x=0; block_x<size; block_x++)
1413  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1414  }
1415 }
1416 
1417 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1418  int Al, const uint8_t *mb_bitmask,
1419  int mb_bitmask_size,
1420  const AVFrame *reference)
1421 {
1422  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1424  const uint8_t *reference_data[MAX_COMPONENTS];
1425  int linesize[MAX_COMPONENTS];
1426  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1427  int bytes_per_pixel = 1 + (s->bits > 8);
1428 
1429  if (mb_bitmask) {
1430  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1431  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1432  return AVERROR_INVALIDDATA;
1433  }
1434  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1435  }
1436 
1437  s->restart_count = 0;
1438 
1439  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1440  &chroma_v_shift);
1441  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1442  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1443 
1444  for (i = 0; i < nb_components; i++) {
1445  int c = s->comp_index[i];
1446  data[c] = s->picture_ptr->data[c];
1447  reference_data[c] = reference ? reference->data[c] : NULL;
1448  linesize[c] = s->linesize[c];
1449  s->coefs_finished[c] |= 1;
1450  }
1451 
1452  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1453  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1454  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1455 
1456  if (s->restart_interval && !s->restart_count)
1458 
1459  if (get_bits_left(&s->gb) < 0) {
1460  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1461  -get_bits_left(&s->gb));
1462  return AVERROR_INVALIDDATA;
1463  }
1464  for (i = 0; i < nb_components; i++) {
1465  uint8_t *ptr;
1466  int n, h, v, x, y, c, j;
1467  int block_offset;
1468  n = s->nb_blocks[i];
1469  c = s->comp_index[i];
1470  h = s->h_scount[i];
1471  v = s->v_scount[i];
1472  x = 0;
1473  y = 0;
1474  for (j = 0; j < n; j++) {
1475  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1476  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1477 
1478  if (s->interlaced && s->bottom_field)
1479  block_offset += linesize[c] >> 1;
1480  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1481  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1482  ptr = data[c] + block_offset;
1483  } else
1484  ptr = NULL;
1485  if (!s->progressive) {
1486  if (copy_mb) {
1487  if (ptr)
1488  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1489  linesize[c], s->avctx->lowres);
1490 
1491  } else {
1492  s->bdsp.clear_block(s->block);
1493  if (decode_block(s, s->block, i,
1494  s->dc_index[i], s->ac_index[i],
1495  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1497  "error y=%d x=%d\n", mb_y, mb_x);
1498  return AVERROR_INVALIDDATA;
1499  }
1500  if (ptr) {
1501  s->idsp.idct_put(ptr, linesize[c], s->block);
1502  if (s->bits & 7)
1503  shift_output(s, ptr, linesize[c]);
1504  }
1505  }
1506  } else {
1507  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1508  (h * mb_x + x);
1509  int16_t *block = s->blocks[c][block_idx];
1510  if (Ah)
1511  block[0] += get_bits1(&s->gb) *
1512  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1513  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1514  s->quant_matrixes[s->quant_sindex[i]],
1515  Al) < 0) {
1517  "error y=%d x=%d\n", mb_y, mb_x);
1518  return AVERROR_INVALIDDATA;
1519  }
1520  }
1521  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1522  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1523  mb_x, mb_y, x, y, c, s->bottom_field,
1524  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1525  if (++x == h) {
1526  x = 0;
1527  y++;
1528  }
1529  }
1530  }
1531 
1532  handle_rstn(s, nb_components);
1533  }
1534  }
1535  return 0;
1536 }
1537 
1539  int se, int Ah, int Al)
1540 {
1541  int mb_x, mb_y;
1542  int EOBRUN = 0;
1543  int c = s->comp_index[0];
1544  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1545 
1546  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1547  if (se < ss || se > 63) {
1548  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1549  return AVERROR_INVALIDDATA;
1550  }
1551 
1552  // s->coefs_finished is a bitmask for coefficients coded
1553  // ss and se are parameters telling start and end coefficients
1554  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1555 
1556  s->restart_count = 0;
1557 
1558  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1559  int block_idx = mb_y * s->block_stride[c];
1560  int16_t (*block)[64] = &s->blocks[c][block_idx];
1561  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1562  if (get_bits_left(&s->gb) <= 0) {
1563  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1564  return AVERROR_INVALIDDATA;
1565  }
1566  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1567  int ret;
1568  if (s->restart_interval && !s->restart_count)
1570 
1571  if (Ah)
1572  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1573  quant_matrix, ss, se, Al, &EOBRUN);
1574  else
1575  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1576  quant_matrix, ss, se, Al, &EOBRUN);
1577  if (ret < 0) {
1579  "error y=%d x=%d\n", mb_y, mb_x);
1580  return AVERROR_INVALIDDATA;
1581  }
1582 
1583  if (handle_rstn(s, 0))
1584  EOBRUN = 0;
1585  }
1586  }
1587  return 0;
1588 }
1589 
1591 {
1592  int mb_x, mb_y;
1593  int c;
1594  const int bytes_per_pixel = 1 + (s->bits > 8);
1595  const int block_size = s->lossless ? 1 : 8;
1596 
1597  for (c = 0; c < s->nb_components; c++) {
1598  uint8_t *data = s->picture_ptr->data[c];
1599  int linesize = s->linesize[c];
1600  int h = s->h_max / s->h_count[c];
1601  int v = s->v_max / s->v_count[c];
1602  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1603  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1604 
1605  if (~s->coefs_finished[c])
1606  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1607 
1608  if (s->interlaced && s->bottom_field)
1609  data += linesize >> 1;
1610 
1611  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1612  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1613  int block_idx = mb_y * s->block_stride[c];
1614  int16_t (*block)[64] = &s->blocks[c][block_idx];
1615  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1616  s->idsp.idct_put(ptr, linesize, *block);
1617  if (s->bits & 7)
1618  shift_output(s, ptr, linesize);
1619  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1620  }
1621  }
1622  }
1623 }
1624 
1626  int mb_bitmask_size, const AVFrame *reference)
1627 {
1628  int len, nb_components, i, h, v, predictor, point_transform;
1629  int index, id, ret;
1630  const int block_size = s->lossless ? 1 : 8;
1631  int ilv, prev_shift;
1632 
1633  if (!s->got_picture) {
1635  "Can not process SOS before SOF, skipping\n");
1636  return -1;
1637  }
1638 
1639  if (reference) {
1640  if (reference->width != s->picture_ptr->width ||
1641  reference->height != s->picture_ptr->height ||
1642  reference->format != s->picture_ptr->format) {
1643  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1644  return AVERROR_INVALIDDATA;
1645  }
1646  }
1647 
1648  /* XXX: verify len field validity */
1649  len = get_bits(&s->gb, 16);
1650  nb_components = get_bits(&s->gb, 8);
1651  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1653  "decode_sos: nb_components (%d)",
1654  nb_components);
1655  return AVERROR_PATCHWELCOME;
1656  }
1657  if (len != 6 + 2 * nb_components) {
1658  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1659  return AVERROR_INVALIDDATA;
1660  }
1661  for (i = 0; i < nb_components; i++) {
1662  id = get_bits(&s->gb, 8) - 1;
1663  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1664  /* find component index */
1665  for (index = 0; index < s->nb_components; index++)
1666  if (id == s->component_id[index])
1667  break;
1668  if (index == s->nb_components) {
1670  "decode_sos: index(%d) out of components\n", index);
1671  return AVERROR_INVALIDDATA;
1672  }
1673  /* Metasoft MJPEG codec has Cb and Cr swapped */
1674  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1675  && nb_components == 3 && s->nb_components == 3 && i)
1676  index = 3 - i;
1677 
1678  s->quant_sindex[i] = s->quant_index[index];
1679  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1680  s->h_scount[i] = s->h_count[index];
1681  s->v_scount[i] = s->v_count[index];
1682 
1683  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1684  index = (index+2)%3;
1685 
1686  s->comp_index[i] = index;
1687 
1688  s->dc_index[i] = get_bits(&s->gb, 4);
1689  s->ac_index[i] = get_bits(&s->gb, 4);
1690 
1691  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1692  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1693  goto out_of_range;
1694  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1695  goto out_of_range;
1696  }
1697 
1698  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1699  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1700  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1701  prev_shift = get_bits(&s->gb, 4); /* Ah */
1702  point_transform = get_bits(&s->gb, 4); /* Al */
1703  }else
1704  prev_shift = point_transform = 0;
1705 
1706  if (nb_components > 1) {
1707  /* interleaved stream */
1708  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1709  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1710  } else if (!s->ls) { /* skip this for JPEG-LS */
1711  h = s->h_max / s->h_scount[0];
1712  v = s->v_max / s->v_scount[0];
1713  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1714  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1715  s->nb_blocks[0] = 1;
1716  s->h_scount[0] = 1;
1717  s->v_scount[0] = 1;
1718  }
1719 
1720  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1721  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1722  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1723  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1724  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1725 
1726 
1727  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1728  for (i = s->mjpb_skiptosod; i > 0; i--)
1729  skip_bits(&s->gb, 8);
1730 
1731 next_field:
1732  for (i = 0; i < nb_components; i++)
1733  s->last_dc[i] = (4 << s->bits);
1734 
1735  if (s->avctx->hwaccel) {
1736  int bytes_to_start = get_bits_count(&s->gb) / 8;
1737  av_assert0(bytes_to_start >= 0 &&
1738  s->raw_scan_buffer_size >= bytes_to_start);
1739 
1740  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1741  s->raw_scan_buffer + bytes_to_start,
1742  s->raw_scan_buffer_size - bytes_to_start);
1743  if (ret < 0)
1744  return ret;
1745 
1746  } else if (s->lossless) {
1747  av_assert0(s->picture_ptr == s->picture);
1748  if (CONFIG_JPEGLS_DECODER && s->ls) {
1749 // for () {
1750 // reset_ls_coding_parameters(s, 0);
1751 
1752  if ((ret = ff_jpegls_decode_picture(s, predictor,
1753  point_transform, ilv)) < 0)
1754  return ret;
1755  } else {
1756  if (s->rgb || s->bayer) {
1757  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1758  return ret;
1759  } else {
1760  if ((ret = ljpeg_decode_yuv_scan(s, predictor,
1761  point_transform,
1762  nb_components)) < 0)
1763  return ret;
1764  }
1765  }
1766  } else {
1767  if (s->progressive && predictor) {
1768  av_assert0(s->picture_ptr == s->picture);
1769  if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor,
1770  ilv, prev_shift,
1771  point_transform)) < 0)
1772  return ret;
1773  } else {
1774  if ((ret = mjpeg_decode_scan(s, nb_components,
1775  prev_shift, point_transform,
1776  mb_bitmask, mb_bitmask_size, reference)) < 0)
1777  return ret;
1778  }
1779  }
1780 
1781  if (s->interlaced &&
1782  get_bits_left(&s->gb) > 32 &&
1783  show_bits(&s->gb, 8) == 0xFF) {
1784  GetBitContext bak = s->gb;
1785  align_get_bits(&bak);
1786  if (show_bits(&bak, 16) == 0xFFD1) {
1787  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1788  s->gb = bak;
1789  skip_bits(&s->gb, 16);
1790  s->bottom_field ^= 1;
1791 
1792  goto next_field;
1793  }
1794  }
1795 
1796  emms_c();
1797  return 0;
1798  out_of_range:
1799  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1800  return AVERROR_INVALIDDATA;
1801 }
1802 
1804 {
1805  if (get_bits(&s->gb, 16) != 4)
1806  return AVERROR_INVALIDDATA;
1807  s->restart_interval = get_bits(&s->gb, 16);
1808  s->restart_count = 0;
1809  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1810  s->restart_interval);
1811 
1812  return 0;
1813 }
1814 
1816 {
1817  int len, id, i;
1818 
1819  len = get_bits(&s->gb, 16);
1820  if (len < 6) {
1821  if (s->bayer) {
1822  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1823  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1824  skip_bits(&s->gb, len);
1825  return 0;
1826  } else
1827  return AVERROR_INVALIDDATA;
1828  }
1829  if (8 * len > get_bits_left(&s->gb))
1830  return AVERROR_INVALIDDATA;
1831 
1832  id = get_bits_long(&s->gb, 32);
1833  len -= 6;
1834 
1835  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1836  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1837  av_fourcc2str(av_bswap32(id)), id, len);
1838 
1839  /* Buggy AVID, it puts EOI only at every 10th frame. */
1840  /* Also, this fourcc is used by non-avid files too, it holds some
1841  information, but it's always present in AVID-created files. */
1842  if (id == AV_RB32("AVI1")) {
1843  /* structure:
1844  4bytes AVI1
1845  1bytes polarity
1846  1bytes always zero
1847  4bytes field_size
1848  4bytes field_size_less_padding
1849  */
1850  s->buggy_avid = 1;
1851  i = get_bits(&s->gb, 8); len--;
1852  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1853  goto out;
1854  }
1855 
1856  if (id == AV_RB32("JFIF")) {
1857  int t_w, t_h, v1, v2;
1858  if (len < 8)
1859  goto out;
1860  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1861  v1 = get_bits(&s->gb, 8);
1862  v2 = get_bits(&s->gb, 8);
1863  skip_bits(&s->gb, 8);
1864 
1865  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1866  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1867  if ( s->avctx->sample_aspect_ratio.num <= 0
1868  || s->avctx->sample_aspect_ratio.den <= 0) {
1869  s->avctx->sample_aspect_ratio.num = 0;
1870  s->avctx->sample_aspect_ratio.den = 1;
1871  }
1872 
1873  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1874  av_log(s->avctx, AV_LOG_INFO,
1875  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1876  v1, v2,
1879 
1880  len -= 8;
1881  if (len >= 2) {
1882  t_w = get_bits(&s->gb, 8);
1883  t_h = get_bits(&s->gb, 8);
1884  if (t_w && t_h) {
1885  /* skip thumbnail */
1886  if (len -10 - (t_w * t_h * 3) > 0)
1887  len -= t_w * t_h * 3;
1888  }
1889  len -= 2;
1890  }
1891  goto out;
1892  }
1893 
1894  if ( id == AV_RB32("Adob")
1895  && len >= 7
1896  && show_bits(&s->gb, 8) == 'e'
1897  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1898  skip_bits(&s->gb, 8); /* 'e' */
1899  skip_bits(&s->gb, 16); /* version */
1900  skip_bits(&s->gb, 16); /* flags0 */
1901  skip_bits(&s->gb, 16); /* flags1 */
1902  s->adobe_transform = get_bits(&s->gb, 8);
1903  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1904  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1905  len -= 7;
1906  goto out;
1907  }
1908 
1909  if (id == AV_RB32("LJIF")) {
1910  int rgb = s->rgb;
1911  int pegasus_rct = s->pegasus_rct;
1912  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1913  av_log(s->avctx, AV_LOG_INFO,
1914  "Pegasus lossless jpeg header found\n");
1915  skip_bits(&s->gb, 16); /* version ? */
1916  skip_bits(&s->gb, 16); /* unknown always 0? */
1917  skip_bits(&s->gb, 16); /* unknown always 0? */
1918  skip_bits(&s->gb, 16); /* unknown always 0? */
1919  switch (i=get_bits(&s->gb, 8)) {
1920  case 1:
1921  rgb = 1;
1922  pegasus_rct = 0;
1923  break;
1924  case 2:
1925  rgb = 1;
1926  pegasus_rct = 1;
1927  break;
1928  default:
1929  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1930  }
1931 
1932  len -= 9;
1933  if (s->got_picture)
1934  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1935  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1936  goto out;
1937  }
1938 
1939  s->rgb = rgb;
1940  s->pegasus_rct = pegasus_rct;
1941 
1942  goto out;
1943  }
1944  if (id == AV_RL32("colr") && len > 0) {
1945  s->colr = get_bits(&s->gb, 8);
1946  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1947  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1948  len --;
1949  goto out;
1950  }
1951  if (id == AV_RL32("xfrm") && len > 0) {
1952  s->xfrm = get_bits(&s->gb, 8);
1953  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1954  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1955  len --;
1956  goto out;
1957  }
1958 
1959  /* JPS extension by VRex */
1960  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1961  int flags, layout, type;
1962  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1963  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1964 
1965  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1966  skip_bits(&s->gb, 16); len -= 2; /* block length */
1967  skip_bits(&s->gb, 8); /* reserved */
1968  flags = get_bits(&s->gb, 8);
1969  layout = get_bits(&s->gb, 8);
1970  type = get_bits(&s->gb, 8);
1971  len -= 4;
1972 
1973  av_freep(&s->stereo3d);
1974  s->stereo3d = av_stereo3d_alloc();
1975  if (!s->stereo3d) {
1976  goto out;
1977  }
1978  if (type == 0) {
1980  } else if (type == 1) {
1981  switch (layout) {
1982  case 0x01:
1984  break;
1985  case 0x02:
1987  break;
1988  case 0x03:
1990  break;
1991  }
1992  if (!(flags & 0x04)) {
1994  }
1995  }
1996  goto out;
1997  }
1998 
1999  /* EXIF metadata */
2000  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2001  GetByteContext gbytes;
2002  int ret, le, ifd_offset, bytes_read;
2003  const uint8_t *aligned;
2004 
2005  skip_bits(&s->gb, 16); // skip padding
2006  len -= 2;
2007 
2008  // init byte wise reading
2009  aligned = align_get_bits(&s->gb);
2010  bytestream2_init(&gbytes, aligned, len);
2011 
2012  // read TIFF header
2013  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2014  if (ret) {
2015  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2016  } else {
2017  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2018 
2019  // read 0th IFD and store the metadata
2020  // (return values > 0 indicate the presence of subimage metadata)
2021  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2022  if (ret < 0) {
2023  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2024  }
2025  }
2026 
2027  bytes_read = bytestream2_tell(&gbytes);
2028  skip_bits(&s->gb, bytes_read << 3);
2029  len -= bytes_read;
2030 
2031  goto out;
2032  }
2033 
2034  /* Apple MJPEG-A */
2035  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2036  id = get_bits_long(&s->gb, 32);
2037  len -= 4;
2038  /* Apple MJPEG-A */
2039  if (id == AV_RB32("mjpg")) {
2040  /* structure:
2041  4bytes field size
2042  4bytes pad field size
2043  4bytes next off
2044  4bytes quant off
2045  4bytes huff off
2046  4bytes image off
2047  4bytes scan off
2048  4bytes data off
2049  */
2050  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2051  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2052  }
2053  }
2054 
2055  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2056  int id2;
2057  unsigned seqno;
2058  unsigned nummarkers;
2059 
2060  id = get_bits_long(&s->gb, 32);
2061  id2 = get_bits(&s->gb, 24);
2062  len -= 7;
2063  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2064  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2065  goto out;
2066  }
2067 
2068  skip_bits(&s->gb, 8);
2069  seqno = get_bits(&s->gb, 8);
2070  len -= 2;
2071  if (seqno == 0) {
2072  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2073  goto out;
2074  }
2075 
2076  nummarkers = get_bits(&s->gb, 8);
2077  len -= 1;
2078  if (nummarkers == 0) {
2079  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2080  goto out;
2081  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2082  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2083  goto out;
2084  } else if (seqno > nummarkers) {
2085  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2086  goto out;
2087  }
2088 
2089  /* Allocate if this is the first APP2 we've seen. */
2090  if (s->iccnum == 0) {
2091  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2092  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2093  return AVERROR(ENOMEM);
2094  }
2095  s->iccnum = nummarkers;
2096  }
2097 
2098  if (s->iccentries[seqno - 1].data) {
2099  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2100  goto out;
2101  }
2102 
2103  s->iccentries[seqno - 1].length = len;
2104  s->iccentries[seqno - 1].data = av_malloc(len);
2105  if (!s->iccentries[seqno - 1].data) {
2106  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2107  return AVERROR(ENOMEM);
2108  }
2109 
2110  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2111  skip_bits(&s->gb, len << 3);
2112  len = 0;
2113  s->iccread++;
2114 
2115  if (s->iccread > s->iccnum)
2116  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2117  }
2118 
2119 out:
2120  /* slow but needed for extreme adobe jpegs */
2121  if (len < 0)
2123  "mjpeg: error, decode_app parser read over the end\n");
2124  while (--len > 0)
2125  skip_bits(&s->gb, 8);
2126 
2127  return 0;
2128 }
2129 
2131 {
2132  int len = get_bits(&s->gb, 16);
2133  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2134  int i;
2135  char *cbuf = av_malloc(len - 1);
2136  if (!cbuf)
2137  return AVERROR(ENOMEM);
2138 
2139  for (i = 0; i < len - 2; i++)
2140  cbuf[i] = get_bits(&s->gb, 8);
2141  if (i > 0 && cbuf[i - 1] == '\n')
2142  cbuf[i - 1] = 0;
2143  else
2144  cbuf[i] = 0;
2145 
2146  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2147  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2148 
2149  /* buggy avid, it puts EOI only at every 10th frame */
2150  if (!strncmp(cbuf, "AVID", 4)) {
2151  parse_avid(s, cbuf, len);
2152  } else if (!strcmp(cbuf, "CS=ITU601"))
2153  s->cs_itu601 = 1;
2154  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2155  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2156  s->flipped = 1;
2157  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2158  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2159  s->multiscope = 2;
2160  }
2161 
2162  av_free(cbuf);
2163  }
2164 
2165  return 0;
2166 }
2167 
2168 /* return the 8 bit start code value and update the search
2169  state. Return -1 if no start code found */
2170 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2171 {
2172  const uint8_t *buf_ptr;
2173  unsigned int v, v2;
2174  int val;
2175  int skipped = 0;
2176 
2177  buf_ptr = *pbuf_ptr;
2178  while (buf_end - buf_ptr > 1) {
2179  v = *buf_ptr++;
2180  v2 = *buf_ptr;
2181  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2182  val = *buf_ptr++;
2183  goto found;
2184  }
2185  skipped++;
2186  }
2187  buf_ptr = buf_end;
2188  val = -1;
2189 found:
2190  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2191  *pbuf_ptr = buf_ptr;
2192  return val;
2193 }
2194 
2196  const uint8_t **buf_ptr, const uint8_t *buf_end,
2197  const uint8_t **unescaped_buf_ptr,
2198  int *unescaped_buf_size)
2199 {
2200  int start_code;
2201  start_code = find_marker(buf_ptr, buf_end);
2202 
2203  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2204  if (!s->buffer)
2205  return AVERROR(ENOMEM);
2206 
2207  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2208  if (start_code == SOS && !s->ls) {
2209  const uint8_t *src = *buf_ptr;
2210  const uint8_t *ptr = src;
2211  uint8_t *dst = s->buffer;
2212 
2213  #define copy_data_segment(skip) do { \
2214  ptrdiff_t length = (ptr - src) - (skip); \
2215  if (length > 0) { \
2216  memcpy(dst, src, length); \
2217  dst += length; \
2218  src = ptr; \
2219  } \
2220  } while (0)
2221 
2222  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2223  ptr = buf_end;
2224  copy_data_segment(0);
2225  } else {
2226  while (ptr < buf_end) {
2227  uint8_t x = *(ptr++);
2228 
2229  if (x == 0xff) {
2230  ptrdiff_t skip = 0;
2231  while (ptr < buf_end && x == 0xff) {
2232  x = *(ptr++);
2233  skip++;
2234  }
2235 
2236  /* 0xFF, 0xFF, ... */
2237  if (skip > 1) {
2238  copy_data_segment(skip);
2239 
2240  /* decrement src as it is equal to ptr after the
2241  * copy_data_segment macro and we might want to
2242  * copy the current value of x later on */
2243  src--;
2244  }
2245 
2246  if (x < RST0 || x > RST7) {
2247  copy_data_segment(1);
2248  if (x)
2249  break;
2250  }
2251  }
2252  }
2253  if (src < ptr)
2254  copy_data_segment(0);
2255  }
2256  #undef copy_data_segment
2257 
2258  *unescaped_buf_ptr = s->buffer;
2259  *unescaped_buf_size = dst - s->buffer;
2260  memset(s->buffer + *unescaped_buf_size, 0,
2262 
2263  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2264  (buf_end - *buf_ptr) - (dst - s->buffer));
2265  } else if (start_code == SOS && s->ls) {
2266  const uint8_t *src = *buf_ptr;
2267  uint8_t *dst = s->buffer;
2268  int bit_count = 0;
2269  int t = 0, b = 0;
2270  PutBitContext pb;
2271 
2272  /* find marker */
2273  while (src + t < buf_end) {
2274  uint8_t x = src[t++];
2275  if (x == 0xff) {
2276  while ((src + t < buf_end) && x == 0xff)
2277  x = src[t++];
2278  if (x & 0x80) {
2279  t -= FFMIN(2, t);
2280  break;
2281  }
2282  }
2283  }
2284  bit_count = t * 8;
2285  init_put_bits(&pb, dst, t);
2286 
2287  /* unescape bitstream */
2288  while (b < t) {
2289  uint8_t x = src[b++];
2290  put_bits(&pb, 8, x);
2291  if (x == 0xFF && b < t) {
2292  x = src[b++];
2293  if (x & 0x80) {
2294  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2295  x &= 0x7f;
2296  }
2297  put_bits(&pb, 7, x);
2298  bit_count--;
2299  }
2300  }
2301  flush_put_bits(&pb);
2302 
2303  *unescaped_buf_ptr = dst;
2304  *unescaped_buf_size = (bit_count + 7) >> 3;
2305  memset(s->buffer + *unescaped_buf_size, 0,
2307  } else {
2308  *unescaped_buf_ptr = *buf_ptr;
2309  *unescaped_buf_size = buf_end - *buf_ptr;
2310  }
2311 
2312  return start_code;
2313 }
2314 
2316 {
2317  int i;
2318 
2319  if (s->iccentries) {
2320  for (i = 0; i < s->iccnum; i++)
2321  av_freep(&s->iccentries[i].data);
2322  av_freep(&s->iccentries);
2323  }
2324 
2325  s->iccread = 0;
2326  s->iccnum = 0;
2327 }
2328 
2329 // SMV JPEG just stacks several output frames into one JPEG picture
2330 // we handle that by setting up the cropping parameters appropriately
2332 {
2333  MJpegDecodeContext *s = avctx->priv_data;
2334  int ret;
2335 
2336  if (s->smv_next_frame > 0) {
2337  av_assert0(s->smv_frame->buf[0]);
2338  av_frame_unref(frame);
2339  ret = av_frame_ref(frame, s->smv_frame);
2340  if (ret < 0)
2341  return ret;
2342  } else {
2343  av_assert0(frame->buf[0]);
2345  ret = av_frame_ref(s->smv_frame, frame);
2346  if (ret < 0)
2347  return ret;
2348  }
2349 
2350  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2351 
2352  frame->width = avctx->coded_width;
2353  frame->height = avctx->coded_height;
2354  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2355  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2356 
2358 
2359  if (s->smv_next_frame == 0)
2361 
2362  return 0;
2363 }
2364 
2366 {
2367  MJpegDecodeContext *s = avctx->priv_data;
2368  int ret;
2369 
2370  av_packet_unref(s->pkt);
2371  ret = ff_decode_get_packet(avctx, s->pkt);
2372  if (ret < 0)
2373  return ret;
2374 
2375 #if CONFIG_SP5X_DECODER || CONFIG_AMV_DECODER
2376  if (avctx->codec_id == AV_CODEC_ID_SP5X ||
2377  avctx->codec_id == AV_CODEC_ID_AMV) {
2378  ret = ff_sp5x_process_packet(avctx, s->pkt);
2379  if (ret < 0)
2380  return ret;
2381  }
2382 #endif
2383 
2384  s->buf_size = s->pkt->size;
2385 
2386  return 0;
2387 }
2388 
2390 {
2391  MJpegDecodeContext *s = avctx->priv_data;
2392  const uint8_t *buf_end, *buf_ptr;
2393  const uint8_t *unescaped_buf_ptr;
2394  int hshift, vshift;
2395  int unescaped_buf_size;
2396  int start_code;
2397  int i, index;
2398  int ret = 0;
2399  int is16bit;
2400 
2401  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG && s->smv_next_frame > 0)
2402  return smv_process_frame(avctx, frame);
2403 
2405  av_freep(&s->stereo3d);
2406  s->adobe_transform = -1;
2407 
2408  if (s->iccnum != 0)
2409  reset_icc_profile(s);
2410 
2411  ret = mjpeg_get_packet(avctx);
2412  if (ret < 0)
2413  return ret;
2414 
2415  buf_ptr = s->pkt->data;
2416  buf_end = s->pkt->data + s->pkt->size;
2417  while (buf_ptr < buf_end) {
2418  /* find start next marker */
2419  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2420  &unescaped_buf_ptr,
2421  &unescaped_buf_size);
2422  /* EOF */
2423  if (start_code < 0) {
2424  break;
2425  } else if (unescaped_buf_size > INT_MAX / 8) {
2426  av_log(avctx, AV_LOG_ERROR,
2427  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2428  start_code, unescaped_buf_size, s->pkt->size);
2429  return AVERROR_INVALIDDATA;
2430  }
2431  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2432  start_code, buf_end - buf_ptr);
2433 
2434  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2435 
2436  if (ret < 0) {
2437  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2438  goto fail;
2439  }
2440 
2441  s->start_code = start_code;
2442  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2443  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2444 
2445  /* process markers */
2446  if (start_code >= RST0 && start_code <= RST7) {
2447  av_log(avctx, AV_LOG_DEBUG,
2448  "restart marker: %d\n", start_code & 0x0f);
2449  /* APP fields */
2450  } else if (start_code >= APP0 && start_code <= APP15) {
2451  if ((ret = mjpeg_decode_app(s)) < 0)
2452  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2453  av_err2str(ret));
2454  /* Comment */
2455  } else if (start_code == COM) {
2456  ret = mjpeg_decode_com(s);
2457  if (ret < 0)
2458  return ret;
2459  } else if (start_code == DQT) {
2460  ret = ff_mjpeg_decode_dqt(s);
2461  if (ret < 0)
2462  return ret;
2463  }
2464 
2465  ret = -1;
2466 
2467  if (!CONFIG_JPEGLS_DECODER &&
2468  (start_code == SOF48 || start_code == LSE)) {
2469  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2470  return AVERROR(ENOSYS);
2471  }
2472 
2473  if (avctx->skip_frame == AVDISCARD_ALL) {
2474  switch(start_code) {
2475  case SOF0:
2476  case SOF1:
2477  case SOF2:
2478  case SOF3:
2479  case SOF48:
2480  case SOI:
2481  case SOS:
2482  case EOI:
2483  break;
2484  default:
2485  goto skip;
2486  }
2487  }
2488 
2489  switch (start_code) {
2490  case SOI:
2491  s->restart_interval = 0;
2492  s->restart_count = 0;
2493  s->raw_image_buffer = buf_ptr;
2494  s->raw_image_buffer_size = buf_end - buf_ptr;
2495  /* nothing to do on SOI */
2496  break;
2497  case DHT:
2498  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2499  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2500  goto fail;
2501  }
2502  break;
2503  case SOF0:
2504  case SOF1:
2505  if (start_code == SOF0)
2507  else
2509  s->lossless = 0;
2510  s->ls = 0;
2511  s->progressive = 0;
2512  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2513  goto fail;
2514  break;
2515  case SOF2:
2517  s->lossless = 0;
2518  s->ls = 0;
2519  s->progressive = 1;
2520  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2521  goto fail;
2522  break;
2523  case SOF3:
2526  s->lossless = 1;
2527  s->ls = 0;
2528  s->progressive = 0;
2529  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2530  goto fail;
2531  break;
2532  case SOF48:
2535  s->lossless = 1;
2536  s->ls = 1;
2537  s->progressive = 0;
2538  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2539  goto fail;
2540  break;
2541  case LSE:
2542  if (!CONFIG_JPEGLS_DECODER ||
2543  (ret = ff_jpegls_decode_lse(s)) < 0)
2544  goto fail;
2545  break;
2546  case EOI:
2547 eoi_parser:
2548  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2549  s->progressive && s->cur_scan && s->got_picture)
2551  s->cur_scan = 0;
2552  if (!s->got_picture) {
2553  av_log(avctx, AV_LOG_WARNING,
2554  "Found EOI before any SOF, ignoring\n");
2555  break;
2556  }
2557  if (s->interlaced) {
2558  s->bottom_field ^= 1;
2559  /* if not bottom field, do not output image yet */
2560  if (s->bottom_field == !s->interlace_polarity)
2561  break;
2562  }
2563  if (avctx->skip_frame == AVDISCARD_ALL) {
2564  s->got_picture = 0;
2565  ret = AVERROR(EAGAIN);
2566  goto the_end_no_picture;
2567  }
2568  if (s->avctx->hwaccel) {
2569  ret = s->avctx->hwaccel->end_frame(s->avctx);
2570  if (ret < 0)
2571  return ret;
2572 
2574  }
2575  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2576  return ret;
2577  s->got_picture = 0;
2578 
2579  frame->pkt_dts = s->pkt->dts;
2580 
2581  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2582  int qp = FFMAX3(s->qscale[0],
2583  s->qscale[1],
2584  s->qscale[2]);
2585 
2586  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2587  }
2588 
2589  goto the_end;
2590  case SOS:
2591  s->raw_scan_buffer = buf_ptr;
2592  s->raw_scan_buffer_size = buf_end - buf_ptr;
2593 
2594  s->cur_scan++;
2595  if (avctx->skip_frame == AVDISCARD_ALL) {
2596  skip_bits(&s->gb, get_bits_left(&s->gb));
2597  break;
2598  }
2599 
2600  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2601  (avctx->err_recognition & AV_EF_EXPLODE))
2602  goto fail;
2603  break;
2604  case DRI:
2605  if ((ret = mjpeg_decode_dri(s)) < 0)
2606  return ret;
2607  break;
2608  case SOF5:
2609  case SOF6:
2610  case SOF7:
2611  case SOF9:
2612  case SOF10:
2613  case SOF11:
2614  case SOF13:
2615  case SOF14:
2616  case SOF15:
2617  case JPG:
2618  av_log(avctx, AV_LOG_ERROR,
2619  "mjpeg: unsupported coding type (%x)\n", start_code);
2620  break;
2621  }
2622 
2623 skip:
2624  /* eof process start code */
2625  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2626  av_log(avctx, AV_LOG_DEBUG,
2627  "marker parser used %d bytes (%d bits)\n",
2628  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2629  }
2630  if (s->got_picture && s->cur_scan) {
2631  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2632  goto eoi_parser;
2633  }
2634  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2635  return AVERROR_INVALIDDATA;
2636 fail:
2637  s->got_picture = 0;
2638  return ret;
2639 the_end:
2640 
2641  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2642 
2643  if (AV_RB32(s->upscale_h)) {
2644  int p;
2646  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2647  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2648  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2649  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2650  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2651  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2652  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2653  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2654  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2655  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2656  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2657  );
2658  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2659  if (ret)
2660  return ret;
2661 
2663  for (p = 0; p<s->nb_components; p++) {
2664  uint8_t *line = s->picture_ptr->data[p];
2665  int w = s->width;
2666  int h = s->height;
2667  if (!s->upscale_h[p])
2668  continue;
2669  if (p==1 || p==2) {
2670  w = AV_CEIL_RSHIFT(w, hshift);
2671  h = AV_CEIL_RSHIFT(h, vshift);
2672  }
2673  if (s->upscale_v[p] == 1)
2674  h = (h+1)>>1;
2675  av_assert0(w > 0);
2676  for (i = 0; i < h; i++) {
2677  if (s->upscale_h[p] == 1) {
2678  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2679  else line[w - 1] = line[(w - 1) / 2];
2680  for (index = w - 2; index > 0; index--) {
2681  if (is16bit)
2682  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2683  else
2684  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2685  }
2686  } else if (s->upscale_h[p] == 2) {
2687  if (is16bit) {
2688  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2689  if (w > 1)
2690  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2691  } else {
2692  line[w - 1] = line[(w - 1) / 3];
2693  if (w > 1)
2694  line[w - 2] = line[w - 1];
2695  }
2696  for (index = w - 3; index > 0; index--) {
2697  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2698  }
2699  }
2700  line += s->linesize[p];
2701  }
2702  }
2703  }
2704  if (AV_RB32(s->upscale_v)) {
2705  int p;
2707  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2708  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2709  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2710  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2711  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2712  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2713  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2714  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2715  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2716  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2717  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2718  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2719  );
2720  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2721  if (ret)
2722  return ret;
2723 
2725  for (p = 0; p < s->nb_components; p++) {
2726  uint8_t *dst;
2727  int w = s->width;
2728  int h = s->height;
2729  if (!s->upscale_v[p])
2730  continue;
2731  if (p==1 || p==2) {
2732  w = AV_CEIL_RSHIFT(w, hshift);
2733  h = AV_CEIL_RSHIFT(h, vshift);
2734  }
2735  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2736  for (i = h - 1; i; i--) {
2737  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2738  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2739  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2740  memcpy(dst, src1, w);
2741  } else {
2742  for (index = 0; index < w; index++)
2743  dst[index] = (src1[index] + src2[index]) >> 1;
2744  }
2745  dst -= s->linesize[p];
2746  }
2747  }
2748  }
2749  if (s->flipped && !s->rgb) {
2750  int j;
2751  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2752  if (ret)
2753  return ret;
2754 
2756  for (index=0; index<s->nb_components; index++) {
2757  uint8_t *dst = s->picture_ptr->data[index];
2758  int w = s->picture_ptr->width;
2759  int h = s->picture_ptr->height;
2760  if(index && index<3){
2761  w = AV_CEIL_RSHIFT(w, hshift);
2762  h = AV_CEIL_RSHIFT(h, vshift);
2763  }
2764  if(dst){
2765  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2766  for (i=0; i<h/2; i++) {
2767  for (j=0; j<w; j++)
2768  FFSWAP(int, dst[j], dst2[j]);
2769  dst += s->picture_ptr->linesize[index];
2770  dst2 -= s->picture_ptr->linesize[index];
2771  }
2772  }
2773  }
2774  }
2775  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2776  int w = s->picture_ptr->width;
2777  int h = s->picture_ptr->height;
2778  av_assert0(s->nb_components == 4);
2779  for (i=0; i<h; i++) {
2780  int j;
2781  uint8_t *dst[4];
2782  for (index=0; index<4; index++) {
2783  dst[index] = s->picture_ptr->data[index]
2784  + s->picture_ptr->linesize[index]*i;
2785  }
2786  for (j=0; j<w; j++) {
2787  int k = dst[3][j];
2788  int r = dst[0][j] * k;
2789  int g = dst[1][j] * k;
2790  int b = dst[2][j] * k;
2791  dst[0][j] = g*257 >> 16;
2792  dst[1][j] = b*257 >> 16;
2793  dst[2][j] = r*257 >> 16;
2794  dst[3][j] = 255;
2795  }
2796  }
2797  }
2798  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2799  int w = s->picture_ptr->width;
2800  int h = s->picture_ptr->height;
2801  av_assert0(s->nb_components == 4);
2802  for (i=0; i<h; i++) {
2803  int j;
2804  uint8_t *dst[4];
2805  for (index=0; index<4; index++) {
2806  dst[index] = s->picture_ptr->data[index]
2807  + s->picture_ptr->linesize[index]*i;
2808  }
2809  for (j=0; j<w; j++) {
2810  int k = dst[3][j];
2811  int r = (255 - dst[0][j]) * k;
2812  int g = (128 - dst[1][j]) * k;
2813  int b = (128 - dst[2][j]) * k;
2814  dst[0][j] = r*257 >> 16;
2815  dst[1][j] = (g*257 >> 16) + 128;
2816  dst[2][j] = (b*257 >> 16) + 128;
2817  dst[3][j] = 255;
2818  }
2819  }
2820  }
2821 
2822  if (s->stereo3d) {
2823  AVStereo3D *stereo = av_stereo3d_create_side_data(frame);
2824  if (stereo) {
2825  stereo->type = s->stereo3d->type;
2826  stereo->flags = s->stereo3d->flags;
2827  }
2828  av_freep(&s->stereo3d);
2829  }
2830 
2831  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2832  AVFrameSideData *sd;
2833  size_t offset = 0;
2834  int total_size = 0;
2835  int i;
2836 
2837  /* Sum size of all parts. */
2838  for (i = 0; i < s->iccnum; i++)
2839  total_size += s->iccentries[i].length;
2840 
2841  sd = av_frame_new_side_data(frame, AV_FRAME_DATA_ICC_PROFILE, total_size);
2842  if (!sd) {
2843  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2844  return AVERROR(ENOMEM);
2845  }
2846 
2847  /* Reassemble the parts, which are now in-order. */
2848  for (i = 0; i < s->iccnum; i++) {
2849  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2850  offset += s->iccentries[i].length;
2851  }
2852  }
2853 
2854  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2856 
2857  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
2858  ret = smv_process_frame(avctx, frame);
2859  if (ret < 0) {
2860  av_frame_unref(frame);
2861  return ret;
2862  }
2863  }
2864  if ((avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2865  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2866  avctx->coded_height > s->orig_height) {
2867  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2868  frame->crop_top = frame->height - avctx->height;
2869  }
2870 
2871  ret = 0;
2872 
2873 the_end_no_picture:
2874  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2875  buf_end - buf_ptr);
2876 
2877  return ret;
2878 }
2879 
2880 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2881  * even without having called ff_mjpeg_decode_init(). */
2883 {
2884  MJpegDecodeContext *s = avctx->priv_data;
2885  int i, j;
2886 
2887  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2888  av_log(avctx, AV_LOG_INFO, "Single field\n");
2889  }
2890 
2891  if (s->picture) {
2892  av_frame_free(&s->picture);
2893  s->picture_ptr = NULL;
2894  } else if (s->picture_ptr)
2896 
2897  av_packet_free(&s->pkt);
2898 
2899  av_frame_free(&s->smv_frame);
2900 
2901  av_freep(&s->buffer);
2902  av_freep(&s->stereo3d);
2903  av_freep(&s->ljpeg_buffer);
2904  s->ljpeg_buffer_size = 0;
2905 
2906  for (i = 0; i < 3; i++) {
2907  for (j = 0; j < 4; j++)
2908  ff_free_vlc(&s->vlcs[i][j]);
2909  }
2910  for (i = 0; i < MAX_COMPONENTS; i++) {
2911  av_freep(&s->blocks[i]);
2912  av_freep(&s->last_nnz[i]);
2913  }
2915 
2916  reset_icc_profile(s);
2917 
2919 
2920  return 0;
2921 }
2922 
2923 static void decode_flush(AVCodecContext *avctx)
2924 {
2925  MJpegDecodeContext *s = avctx->priv_data;
2926  s->got_picture = 0;
2927 
2928  s->smv_next_frame = 0;
2930 }
2931 
2932 #if CONFIG_MJPEG_DECODER
2933 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2934 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2935 static const AVOption options[] = {
2936  { "extern_huff", "Use external huffman table.",
2937  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2938  { NULL },
2939 };
2940 
2941 static const AVClass mjpegdec_class = {
2942  .class_name = "MJPEG decoder",
2943  .item_name = av_default_item_name,
2944  .option = options,
2945  .version = LIBAVUTIL_VERSION_INT,
2946 };
2947 
2949  .name = "mjpeg",
2950  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2951  .type = AVMEDIA_TYPE_VIDEO,
2952  .id = AV_CODEC_ID_MJPEG,
2953  .priv_data_size = sizeof(MJpegDecodeContext),
2955  .close = ff_mjpeg_decode_end,
2957  .flush = decode_flush,
2958  .capabilities = AV_CODEC_CAP_DR1,
2959  .max_lowres = 3,
2960  .priv_class = &mjpegdec_class,
2964  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2965 #if CONFIG_MJPEG_NVDEC_HWACCEL
2966  HWACCEL_NVDEC(mjpeg),
2967 #endif
2968 #if CONFIG_MJPEG_VAAPI_HWACCEL
2969  HWACCEL_VAAPI(mjpeg),
2970 #endif
2971  NULL
2972  },
2973 };
2974 #endif
2975 #if CONFIG_THP_DECODER
2977  .name = "thp",
2978  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2979  .type = AVMEDIA_TYPE_VIDEO,
2980  .id = AV_CODEC_ID_THP,
2981  .priv_data_size = sizeof(MJpegDecodeContext),
2983  .close = ff_mjpeg_decode_end,
2985  .flush = decode_flush,
2986  .capabilities = AV_CODEC_CAP_DR1,
2987  .max_lowres = 3,
2990 };
2991 #endif
2992 
2993 #if CONFIG_SMVJPEG_DECODER
2995  .name = "smvjpeg",
2996  .long_name = NULL_IF_CONFIG_SMALL("SMV JPEG"),
2997  .type = AVMEDIA_TYPE_VIDEO,
2998  .id = AV_CODEC_ID_SMVJPEG,
2999  .priv_data_size = sizeof(MJpegDecodeContext),
3001  .close = ff_mjpeg_decode_end,
3003  .flush = decode_flush,
3004  .capabilities = AV_CODEC_CAP_DR1,
3007 };
3008 #endif
int block_stride[MAX_COMPONENTS]
Definition: mjpegdec.h:94
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1317
const struct AVCodec * codec
Definition: avcodec.h:545
const AVPixFmtDescriptor * pix_desc
!< stereoscopic information (cached, since it is read before frame allocation)
Definition: mjpegdec.h:144
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
Definition: mjpeg.h:81
int v_count[MAX_COMPONENTS]
Definition: mjpegdec.h:97
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:275
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:115
AVOption.
Definition: opt.h:248
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
enum AVPixelFormat hwaccel_sw_pix_fmt
Definition: mjpegdec.h:163
Definition: mjpeg.h:71
Definition: mjpeg.h:111
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:724
Definition: mjpeg.h:73
float re
Definition: fft.c:82
Definition: mjpeg.h:40
#define VD
Definition: av1dec.c:1110
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
misc image utilities
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
Definition: mjpeg.h:42
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:84
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:509
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
const char * g
Definition: vf_curves.c:117
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:389
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:518
size_t raw_image_buffer_size
Definition: mjpegdec.h:156
void(* clear_block)(int16_t *block)
Definition: blockdsp.h:36
#define avpriv_request_sample(...)
static int mjpeg_get_packet(AVCodecContext *avctx)
Definition: mjpegdec.c:2365
int h_scount[MAX_COMPONENTS]
Definition: mjpegdec.h:102
BlockDSPContext bdsp
Definition: mjpegdec.h:119
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:195
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2130
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1171
TIFF constants & data structures.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
Numerator.
Definition: rational.h:59
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
int qscale[4]
quantizer scale calculated from quant_matrixes
Definition: mjpegdec.h:66
int size
Definition: packet.h:370
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: sp5xdec.c:33
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
uint8_t * buffer
Definition: mjpegdec.h:62
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:915
AVPacket * pkt
Definition: mjpegdec.h:58
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
#define copy_data_segment(skip)
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
GLint GLenum type
Definition: opengl_enc.c:104
Definition: mjpeg.h:68
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
int dc_index[MAX_COMPONENTS]
Definition: mjpegdec.h:99
Definition: mjpeg.h:75
#define FF_ARRAY_ELEMS(a)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
Definition: mjpeg.h:53
int linesize[MAX_COMPONENTS]
linesize << interlaced
Definition: mjpegdec.h:111
discard all
Definition: avcodec.h:236
uint8_t permutated[64]
Definition: idctdsp.h:33
Views are next to each other.
Definition: stereo3d.h:67
uint8_t upscale_v[4]
Definition: mjpegdec.h:78
uint8_t run
Definition: svq3.c:203
size_t crop_bottom
Definition: frame.h:679
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:560
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1747
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:800
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1680
fg index
int profile
profile
Definition: avcodec.h:1858
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:236
AVCodec.
Definition: codec.h:197
EXIF metadata parser.
JPEG-LS decoder.
MJPEG encoder and decoder.
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1960
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
int comp_index[MAX_COMPONENTS]
Definition: mjpegdec.h:98
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2315
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1590
uint8_t * data
Definition: mjpegdec.h:48
HpelDSPContext hdsp
Definition: mjpegdec.h:120
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:659
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1958
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2006
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
const uint8_t * raw_image_buffer
Definition: mjpegdec.h:155
int16_t block[64]
Definition: mjpegdec.h:113
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
Definition: mjpeg.h:72
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
#define av_cold
Definition: attributes.h:88
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:191
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1803
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:176
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1624
uint16_t(* ljpeg_buffer)[4]
Definition: mjpegdec.h:136
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Definition: mjpeg.h:46
unsigned int ljpeg_buffer_size
Definition: mjpegdec.h:137
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:222
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:2184
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1962
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
Definition: mjpeg.h:54
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:91
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
uint8_t * last_nnz[MAX_COMPONENTS]
Definition: mjpegdec.h:115
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
AVFrame * picture_ptr
Definition: mjpegdec.h:109
Structure to hold side data for an AVFrame.
Definition: frame.h:220
#define height
uint8_t * data
Definition: packet.h:369
int quant_sindex[MAX_COMPONENTS]
Definition: mjpegdec.h:104
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
Definition: pixfmt.h:100
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1961
int h_count[MAX_COMPONENTS]
Definition: mjpegdec.h:96
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define ff_dlog(a,...)
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:390
AVDictionary * metadata
metadata.
Definition: frame.h:604
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:465
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1754
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
ptrdiff_t size
Definition: opengl_enc.c:100
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:441
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1178
#define av_log(a,...)
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
static int aligned(int val)
Definition: dashdec.c:168
#define src
Definition: vp8dsp.c:255
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
enum AVCodecID id
Definition: codec.h:211
AVDictionary * exif_metadata
Definition: mjpegdec.h:140
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:867
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
int width
Definition: frame.h:376
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1031
static const uint16_t mask[17]
Definition: lzw.c:38
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:966
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
int nb_blocks[MAX_COMPONENTS]
Definition: mjpegdec.h:101
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:204
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2882
VLC vlcs[3][4]
Definition: mjpegdec.h:65
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:97
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:117
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
const char * r
Definition: vf_curves.c:116
unsigned int pos
Definition: spdifenc.c:412
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:443
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your local see the OFFSET() macro
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:616
#define av_clip_int16
Definition: common.h:137
Definition: graph2dot.c:48
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: codec.h:204
uint8_t bits
Definition: vp3data.h:141
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1417
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2170
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
#define FFMAX(a, b)
Definition: common.h:103
#define fail()
Definition: checkasm.h:133
Definition: mjpeg.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:67
Definition: mjpeg.h:70
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
size_t crop_top
Definition: frame.h:678
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
JPEG-LS.
Definition: mjpeg.h:103
Definition: mjpeg.h:79
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
ScanTable scantable
Definition: mjpegdec.h:118
Definition: mjpeg.h:80
#define b
Definition: input.c:41
AVFrame * smv_frame
Definition: mjpegdec.h:150
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1386
Definition: mjpeg.h:56
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:297
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:418
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1645
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define FFMIN(a, b)
Definition: common.h:105
Definition: mjpeg.h:44
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:163
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
uint8_t interlaced
Definition: mxfenc.c:2208
#define width
int component_id[MAX_COMPONENTS]
Definition: mjpegdec.h:95
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1815
#define NEG_USR32(a, s)
Definition: mathops.h:166
uint8_t w
Definition: llviddspenc.c:39
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
uint8_t raw_huffman_lengths[2][4][16]
Definition: mjpegdec.h:160
Definition: mjpeg.h:41
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1959
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int quant_index[4]
Definition: mjpegdec.h:106
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int v_scount[MAX_COMPONENTS]
Definition: mjpegdec.h:103
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1656
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:706
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AVCodec ff_smvjpeg_decoder
GetBitContext gb
Definition: mjpegdec.h:55
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:72
HW acceleration through CUDA.
Definition: pixfmt.h:235
#define ZERO_RUN
Definition: mjpegdec.c:948
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
Full range content.
Definition: pixfmt.h:586
int bits
Definition: vlc.h:27
if(ret)
static const float pred[4]
Definition: siprdata.h:259
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:391
AVCodec ff_mjpeg_decoder
IDCTDSPContext idsp
Definition: mjpegdec.h:121
#define src1
Definition: h264pred.c:140
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define av_bswap32
Definition: bswap.h:33
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2389
Libavcodec external API header.
Views are on top of each other.
Definition: stereo3d.h:79
Definition: mjpeg.h:52
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:91
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2539
enum AVCodecID codec_id
Definition: avcodec.h:546
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
int debug
debug
Definition: avcodec.h:1623
AVStereo3D * stereo3d
Definition: mjpegdec.h:142
main external API structure.
Definition: avcodec.h:536
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:561
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1893
uint8_t * data
Definition: frame.h:222
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
int extradata_size
Definition: avcodec.h:638
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
int length
Definition: mjpegdec.h:49
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:108
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:348
int coded_height
Definition: avcodec.h:724
Describe the class of an AVClass context structure.
Definition: log.h:67
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static const AVProfile profiles[]
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
Definition: frame.c:727
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:782
int ac_index[MAX_COMPONENTS]
Definition: mjpegdec.h:100
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1164
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1066
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
static const SheerTable rgb[2]
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
Definition: mjpeg.h:45
uint64_t coefs_finished[MAX_COMPONENTS]
bitmask of which coefs have been completely decoded (progressive mode)
Definition: mjpegdec.h:116
Definition: mjpeg.h:48
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
enum AVPixelFormat hwaccel_pix_fmt
Definition: mjpegdec.h:164
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
uint8_t raw_huffman_values[2][4][256]
Definition: mjpegdec.h:161
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1538
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
#define MIN_CACHE_BITS
Definition: get_bits.h:128
Definition: mjpeg.h:47
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
JPEG-LS extension parameters.
Definition: mjpeg.h:104
#define flags(name, subs,...)
Definition: cbs_av1.c:561
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
size_t raw_scan_buffer_size
Definition: mjpegdec.h:158
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:56
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2528
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
uint8_t level
Definition: svq3.c:204
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1625
Narrow or limited range content.
Definition: pixfmt.h:569
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2500
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:117
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:849
Definition: mjpeg.h:94
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:169
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:427
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1232
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
const OptionDef options[]
Definition: ffmpeg_opt.c:3427
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:147
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:61
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
ICCEntry * iccentries
Definition: mjpegdec.h:146
#define FF_DEBUG_QP
Definition: avcodec.h:1628
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:2183
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:66
int den
Denominator.
Definition: rational.h:60
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
static int lowres
Definition: ffplay.c:336
const uint8_t * raw_scan_buffer
Definition: mjpegdec.h:157
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
AVCodecContext * avctx
Definition: mjpegdec.h:54
void * priv_data
Definition: avcodec.h:563
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
#define av_free(p)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1631
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1402
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:470
int got_picture
we found a SOF and picture is valid, too.
Definition: mjpegdec.h:110
int len
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2548
int16_t(*[MAX_COMPONENTS] blocks)[64]
intermediate sums (progressive mode)
Definition: mjpegdec.h:114
AVFrame * picture
Definition: mjpegdec.h:108
void * hwaccel_picture_private
Definition: mjpegdec.h:165
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
The official guide to swscale for confused that is
Definition: swscale.txt:2
Definition: mjpeg.h:50
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
int last_dc[MAX_COMPONENTS]
Definition: mjpegdec.h:107
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:54
#define REFINE_BIT(j)
Definition: mjpegdec.c:940
uint8_t upscale_h[4]
Definition: mjpegdec.h:77
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:368
static int smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2331
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2923
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1227
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
int height
Definition: frame.h:376
FILE * out
Definition: movenc.c:54
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1193
#define av_always_inline
Definition: attributes.h:45
static const uint8_t start_code[]
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:608
Definition: mjpeg.h:82
#define FFSWAP(type, a, b)
Definition: common.h:108
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2195
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:103
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
MJPEG decoder.
#define MKTAG(a, b, c, d)
Definition: common.h:478
AVCodec ff_thp_decoder
Definition: mjpeg.h:61
enum AVCodecID id
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static double val(void *priv, double ch)
Definition: aeval.c:76
Definition: rpzaenc.c:58
uint16_t quant_matrixes[4][64]
Definition: mjpegdec.h:64
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators...
Definition: codec.h:52
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
for(j=16;j >0;--j)
int i
Definition: input.c:407
#define FFMAX3(a, b, c)
Definition: common.h:104
GLuint buffer
Definition: opengl_enc.c:101
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
Definition: mjpeg.h:49
bitstream writer API