FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/display.h"
36 #include "libavutil/emms.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/avassert.h"
39 #include "libavutil/opt.h"
40 #include "avcodec.h"
41 #include "blockdsp.h"
42 #include "codec_internal.h"
43 #include "copy_block.h"
44 #include "decode.h"
45 #include "hwaccel_internal.h"
46 #include "hwconfig.h"
47 #include "idctdsp.h"
48 #include "internal.h"
49 #include "jpegtables.h"
50 #include "mjpeg.h"
51 #include "mjpegdec.h"
52 #include "jpeglsdec.h"
53 #include "profiles.h"
54 #include "put_bits.h"
55 #include "exif.h"
56 #include "bytestream.h"
57 #include "tiff_common.h"
58 
59 
61 {
62  static const struct {
63  int class;
64  int index;
65  const uint8_t *bits;
66  const uint8_t *values;
67  int length;
68  } ht[] = {
70  ff_mjpeg_val_dc, 12 },
72  ff_mjpeg_val_dc, 12 },
81  };
82  int i, ret;
83 
84  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
85  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
86  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
87  ht[i].bits, ht[i].values,
88  ht[i].class == 1, s->avctx);
89  if (ret < 0)
90  return ret;
91 
92  if (ht[i].class < 2) {
93  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
94  ht[i].bits + 1, 16);
95  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
96  ht[i].values, ht[i].length);
97  }
98  }
99 
100  return 0;
101 }
102 
103 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
104 {
105  s->buggy_avid = 1;
106  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
107  s->interlace_polarity = 1;
108  if (len > 14 && buf[12] == 2) /* 2 - PAL */
109  s->interlace_polarity = 0;
110  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
111  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
112 }
113 
114 static void init_idct(AVCodecContext *avctx)
115 {
116  MJpegDecodeContext *s = avctx->priv_data;
117 
118  ff_idctdsp_init(&s->idsp, avctx);
119  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
120  s->idsp.idct_permutation);
121 }
122 
124 {
125  MJpegDecodeContext *s = avctx->priv_data;
126  int ret;
127 
128  if (!s->picture_ptr) {
129  s->picture = av_frame_alloc();
130  if (!s->picture)
131  return AVERROR(ENOMEM);
132  s->picture_ptr = s->picture;
133  }
134 
135  s->avctx = avctx;
136  ff_blockdsp_init(&s->bdsp);
137  ff_hpeldsp_init(&s->hdsp, avctx->flags);
138  init_idct(avctx);
139  s->buffer_size = 0;
140  s->buffer = NULL;
141  s->start_code = -1;
142  s->first_picture = 1;
143  s->got_picture = 0;
144  s->orig_height = avctx->coded_height;
146  avctx->colorspace = AVCOL_SPC_BT470BG;
147  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
148 
149  if ((ret = init_default_huffman_tables(s)) < 0)
150  return ret;
151 
152  if (s->extern_huff) {
153  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
154  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
155  return ret;
156  if (ff_mjpeg_decode_dht(s)) {
157  av_log(avctx, AV_LOG_ERROR,
158  "error using external huffman table, switching back to internal\n");
159  if ((ret = init_default_huffman_tables(s)) < 0)
160  return ret;
161  }
162  }
163  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
164  s->interlace_polarity = 1; /* bottom field first */
165  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
166  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
167  if (avctx->codec_tag == AV_RL32("MJPG"))
168  s->interlace_polarity = 1;
169  }
170 
171  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
172  if (avctx->extradata_size >= 4)
173  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
174 
175  if (s->smv_frames_per_jpeg <= 0) {
176  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
177  return AVERROR_INVALIDDATA;
178  }
179 
180  s->smv_frame = av_frame_alloc();
181  if (!s->smv_frame)
182  return AVERROR(ENOMEM);
183  } else if (avctx->extradata_size > 8
184  && AV_RL32(avctx->extradata) == 0x2C
185  && AV_RL32(avctx->extradata+4) == 0x18) {
186  parse_avid(s, avctx->extradata, avctx->extradata_size);
187  }
188 
189  if (avctx->codec->id == AV_CODEC_ID_AMV)
190  s->flipped = 1;
191 
192  return 0;
193 }
194 
195 
196 /* quantize tables */
198 {
199  int len, index, i;
200 
201  len = get_bits(&s->gb, 16) - 2;
202 
203  if (8*len > get_bits_left(&s->gb)) {
204  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
205  return AVERROR_INVALIDDATA;
206  }
207 
208  while (len >= 65) {
209  int pr = get_bits(&s->gb, 4);
210  if (pr > 1) {
211  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
212  return AVERROR_INVALIDDATA;
213  }
214  index = get_bits(&s->gb, 4);
215  if (index >= 4)
216  return -1;
217  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
218  /* read quant table */
219  for (i = 0; i < 64; i++) {
220  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
221  if (s->quant_matrixes[index][i] == 0) {
222  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
223  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
224  if (s->avctx->err_recognition & AV_EF_EXPLODE)
225  return AVERROR_INVALIDDATA;
226  }
227  }
228 
229  // XXX FIXME fine-tune, and perhaps add dc too
230  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
231  s->quant_matrixes[index][8]) >> 1;
232  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
233  index, s->qscale[index]);
234  len -= 1 + 64 * (1+pr);
235  }
236  return 0;
237 }
238 
239 /* decode huffman tables and build VLC decoders */
241 {
242  int len, index, i, class, n, v;
243  uint8_t bits_table[17];
244  uint8_t val_table[256];
245  int ret = 0;
246 
247  len = get_bits(&s->gb, 16) - 2;
248 
249  if (8*len > get_bits_left(&s->gb)) {
250  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
251  return AVERROR_INVALIDDATA;
252  }
253 
254  while (len > 0) {
255  if (len < 17)
256  return AVERROR_INVALIDDATA;
257  class = get_bits(&s->gb, 4);
258  if (class >= 2)
259  return AVERROR_INVALIDDATA;
260  index = get_bits(&s->gb, 4);
261  if (index >= 4)
262  return AVERROR_INVALIDDATA;
263  n = 0;
264  for (i = 1; i <= 16; i++) {
265  bits_table[i] = get_bits(&s->gb, 8);
266  n += bits_table[i];
267  }
268  len -= 17;
269  if (len < n || n > 256)
270  return AVERROR_INVALIDDATA;
271 
272  for (i = 0; i < n; i++) {
273  v = get_bits(&s->gb, 8);
274  val_table[i] = v;
275  }
276  len -= n;
277 
278  /* build VLC and flush previous vlc if present */
279  ff_vlc_free(&s->vlcs[class][index]);
280  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
281  class, index, n);
282  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
283  val_table, class > 0, s->avctx)) < 0)
284  return ret;
285 
286  if (class > 0) {
287  ff_vlc_free(&s->vlcs[2][index]);
288  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
289  val_table, 0, s->avctx)) < 0)
290  return ret;
291  }
292 
293  for (i = 0; i < 16; i++)
294  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
295  for (i = 0; i < 256; i++)
296  s->raw_huffman_values[class][index][i] = val_table[i];
297  }
298  return 0;
299 }
300 
302 {
303  int len, nb_components, i, width, height, bits, ret, size_change;
304  unsigned pix_fmt_id;
305  int h_count[MAX_COMPONENTS] = { 0 };
306  int v_count[MAX_COMPONENTS] = { 0 };
307 
308  s->cur_scan = 0;
309  memset(s->upscale_h, 0, sizeof(s->upscale_h));
310  memset(s->upscale_v, 0, sizeof(s->upscale_v));
311 
312  len = get_bits(&s->gb, 16);
313  bits = get_bits(&s->gb, 8);
314 
315  if (bits > 16 || bits < 1) {
316  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
317  return AVERROR_INVALIDDATA;
318  }
319 
320  if (s->avctx->bits_per_raw_sample != bits) {
321  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
322  s->avctx->bits_per_raw_sample = bits;
323  init_idct(s->avctx);
324  }
325  if (s->pegasus_rct)
326  bits = 9;
327  if (bits == 9 && !s->pegasus_rct)
328  s->rct = 1; // FIXME ugly
329 
330  if(s->lossless && s->avctx->lowres){
331  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
332  return -1;
333  }
334 
335  height = get_bits(&s->gb, 16);
336  width = get_bits(&s->gb, 16);
337 
338  // HACK for odd_height.mov
339  if (s->interlaced && s->width == width && s->height == height + 1)
340  height= s->height;
341 
342  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
343  if (av_image_check_size(width, height, 0, s->avctx) < 0)
344  return AVERROR_INVALIDDATA;
345  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
346  return AVERROR_INVALIDDATA;
347 
348  nb_components = get_bits(&s->gb, 8);
349  if (nb_components <= 0 ||
350  nb_components > MAX_COMPONENTS)
351  return -1;
352  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
353  if (nb_components != s->nb_components) {
354  av_log(s->avctx, AV_LOG_ERROR,
355  "nb_components changing in interlaced picture\n");
356  return AVERROR_INVALIDDATA;
357  }
358  }
359  if (s->ls && !(bits <= 8 || nb_components == 1)) {
361  "JPEG-LS that is not <= 8 "
362  "bits/component or 16-bit gray");
363  return AVERROR_PATCHWELCOME;
364  }
365  if (len != 8 + 3 * nb_components) {
366  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
367  return AVERROR_INVALIDDATA;
368  }
369 
370  s->nb_components = nb_components;
371  s->h_max = 1;
372  s->v_max = 1;
373  for (i = 0; i < nb_components; i++) {
374  /* component id */
375  s->component_id[i] = get_bits(&s->gb, 8);
376  h_count[i] = get_bits(&s->gb, 4);
377  v_count[i] = get_bits(&s->gb, 4);
378  /* compute hmax and vmax (only used in interleaved case) */
379  if (h_count[i] > s->h_max)
380  s->h_max = h_count[i];
381  if (v_count[i] > s->v_max)
382  s->v_max = v_count[i];
383  s->quant_index[i] = get_bits(&s->gb, 8);
384  if (s->quant_index[i] >= 4) {
385  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
386  return AVERROR_INVALIDDATA;
387  }
388  if (!h_count[i] || !v_count[i]) {
389  av_log(s->avctx, AV_LOG_ERROR,
390  "Invalid sampling factor in component %d %d:%d\n",
391  i, h_count[i], v_count[i]);
392  return AVERROR_INVALIDDATA;
393  }
394 
395  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
396  i, h_count[i], v_count[i],
397  s->component_id[i], s->quant_index[i]);
398  }
399  if ( nb_components == 4
400  && s->component_id[0] == 'C'
401  && s->component_id[1] == 'M'
402  && s->component_id[2] == 'Y'
403  && s->component_id[3] == 'K')
404  s->adobe_transform = 0;
405 
406  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
407  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
408  return AVERROR_PATCHWELCOME;
409  }
410 
411  if (s->bayer) {
412  if (nb_components == 2) {
413  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
414  width stored in their SOF3 markers is the width of each one. We only output
415  a single component, therefore we need to adjust the output image width. We
416  handle the deinterleaving (but not the debayering) in this file. */
417  width *= 2;
418  }
419  /* They can also contain 1 component, which is double the width and half the height
420  of the final image (rows are interleaved). We don't handle the decoding in this
421  file, but leave that to the TIFF/DNG decoder. */
422  }
423 
424  /* if different size, realloc/alloc picture */
425  if (width != s->width || height != s->height || bits != s->bits ||
426  memcmp(s->h_count, h_count, sizeof(h_count)) ||
427  memcmp(s->v_count, v_count, sizeof(v_count))) {
428  size_change = 1;
429 
430  s->width = width;
431  s->height = height;
432  s->bits = bits;
433  memcpy(s->h_count, h_count, sizeof(h_count));
434  memcpy(s->v_count, v_count, sizeof(v_count));
435  s->interlaced = 0;
436  s->got_picture = 0;
437 
438  /* test interlaced mode */
439  if (s->first_picture &&
440  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
441  s->orig_height != 0 &&
442  s->height < ((s->orig_height * 3) / 4)) {
443  s->interlaced = 1;
444  s->bottom_field = s->interlace_polarity;
445  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
446  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
447  height *= 2;
448  }
449 
450  ret = ff_set_dimensions(s->avctx, width, height);
451  if (ret < 0)
452  return ret;
453 
454  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
455  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
456  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
457  s->orig_height < height)
458  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
459 
460  s->first_picture = 0;
461  } else {
462  size_change = 0;
463  }
464 
465  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
466  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
467  if (s->avctx->height <= 0)
468  return AVERROR_INVALIDDATA;
469  }
470 
471  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
472  if (s->progressive) {
473  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
474  return AVERROR_INVALIDDATA;
475  }
476  } else {
477  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
478  s->rgb = 1;
479  else if (!s->lossless)
480  s->rgb = 0;
481  /* XXX: not complete test ! */
482  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
483  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
484  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
485  (s->h_count[3] << 4) | s->v_count[3];
486  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
487  /* NOTE we do not allocate pictures large enough for the possible
488  * padding of h/v_count being 4 */
489  if (!(pix_fmt_id & 0xD0D0D0D0))
490  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
491  if (!(pix_fmt_id & 0x0D0D0D0D))
492  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
493 
494  for (i = 0; i < 8; i++) {
495  int j = 6 + (i&1) - (i&6);
496  int is = (pix_fmt_id >> (4*i)) & 0xF;
497  int js = (pix_fmt_id >> (4*j)) & 0xF;
498 
499  if (is == 1 && js != 2 && (i < 2 || i > 5))
500  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
501  if (is == 1 && js != 2 && (i < 2 || i > 5))
502  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
503 
504  if (is == 1 && js == 2) {
505  if (i & 1) s->upscale_h[j/2] = 1;
506  else s->upscale_v[j/2] = 1;
507  }
508  }
509 
510  if (s->bayer) {
511  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
512  goto unk_pixfmt;
513  }
514 
515  switch (pix_fmt_id) {
516  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
517  if (!s->bayer)
518  goto unk_pixfmt;
519  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
520  break;
521  case 0x11111100:
522  if (s->rgb)
523  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
524  else {
525  if ( s->adobe_transform == 0
526  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
527  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
528  } else {
529  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
530  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
531  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
532  }
533  }
534  av_assert0(s->nb_components == 3);
535  break;
536  case 0x11111111:
537  if (s->rgb)
538  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
539  else {
540  if (s->adobe_transform == 0 && s->bits <= 8) {
541  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
542  } else {
543  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
544  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
545  }
546  }
547  av_assert0(s->nb_components == 4);
548  break;
549  case 0x11412100:
550  if (s->bits > 8)
551  goto unk_pixfmt;
552  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
553  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
554  s->upscale_h[0] = 4;
555  s->upscale_h[1] = 0;
556  s->upscale_h[2] = 1;
557  } else {
558  goto unk_pixfmt;
559  }
560  break;
561  case 0x22111122:
562  case 0x22111111:
563  if (s->adobe_transform == 0 && s->bits <= 8) {
564  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
565  s->upscale_v[1] = s->upscale_v[2] = 1;
566  s->upscale_h[1] = s->upscale_h[2] = 1;
567  } else if (s->adobe_transform == 2 && s->bits <= 8) {
568  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
569  s->upscale_v[1] = s->upscale_v[2] = 1;
570  s->upscale_h[1] = s->upscale_h[2] = 1;
571  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
572  } else {
573  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
574  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
575  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
576  }
577  av_assert0(s->nb_components == 4);
578  break;
579  case 0x12121100:
580  case 0x22122100:
581  case 0x21211100:
582  case 0x21112100:
583  case 0x22211200:
584  case 0x22221100:
585  case 0x22112200:
586  case 0x11222200:
587  if (s->bits > 8)
588  goto unk_pixfmt;
589  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
590  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
591  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
592  } else {
593  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
594  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
595  }
596  break;
597  case 0x11000000:
598  case 0x13000000:
599  case 0x14000000:
600  case 0x31000000:
601  case 0x33000000:
602  case 0x34000000:
603  case 0x41000000:
604  case 0x43000000:
605  case 0x44000000:
606  if(s->bits <= 8)
607  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
608  else
609  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
610  break;
611  case 0x12111100:
612  case 0x14121200:
613  case 0x14111100:
614  case 0x22211100:
615  case 0x22112100:
616  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
617  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
618  else
619  goto unk_pixfmt;
620  s->upscale_v[1] = s->upscale_v[2] = 1;
621  } else {
622  if (pix_fmt_id == 0x14111100)
623  s->upscale_v[1] = s->upscale_v[2] = 1;
624  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
625  else
626  goto unk_pixfmt;
627  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
628  }
629  break;
630  case 0x21111100:
631  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
632  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
633  else
634  goto unk_pixfmt;
635  s->upscale_h[1] = s->upscale_h[2] = 1;
636  } else {
637  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
638  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
639  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
640  }
641  break;
642  case 0x11311100:
643  if (s->bits > 8)
644  goto unk_pixfmt;
645  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
646  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
647  else
648  goto unk_pixfmt;
649  s->upscale_h[0] = s->upscale_h[2] = 2;
650  break;
651  case 0x31111100:
652  if (s->bits > 8)
653  goto unk_pixfmt;
654  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
655  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
656  s->upscale_h[1] = s->upscale_h[2] = 2;
657  break;
658  case 0x22121100:
659  case 0x22111200:
660  case 0x41211100:
661  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
662  else
663  goto unk_pixfmt;
664  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
665  break;
666  case 0x22111100:
667  case 0x23111100:
668  case 0x42111100:
669  case 0x24111100:
670  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
671  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
672  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
673  if (pix_fmt_id == 0x42111100) {
674  if (s->bits > 8)
675  goto unk_pixfmt;
676  s->upscale_h[1] = s->upscale_h[2] = 1;
677  } else if (pix_fmt_id == 0x24111100) {
678  if (s->bits > 8)
679  goto unk_pixfmt;
680  s->upscale_v[1] = s->upscale_v[2] = 1;
681  } else if (pix_fmt_id == 0x23111100) {
682  if (s->bits > 8)
683  goto unk_pixfmt;
684  s->upscale_v[1] = s->upscale_v[2] = 2;
685  }
686  break;
687  case 0x41111100:
688  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
689  else
690  goto unk_pixfmt;
691  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
692  break;
693  default:
694  unk_pixfmt:
695  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
696  memset(s->upscale_h, 0, sizeof(s->upscale_h));
697  memset(s->upscale_v, 0, sizeof(s->upscale_v));
698  return AVERROR_PATCHWELCOME;
699  }
700  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
701  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
702  return AVERROR_PATCHWELCOME;
703  }
704  if (s->ls) {
705  memset(s->upscale_h, 0, sizeof(s->upscale_h));
706  memset(s->upscale_v, 0, sizeof(s->upscale_v));
707  if (s->nb_components == 3) {
708  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
709  } else if (s->nb_components != 1) {
710  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
711  return AVERROR_PATCHWELCOME;
712  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
713  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
714  else if (s->bits <= 8)
715  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
716  else
717  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
718  }
719 
720  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
721  if (!s->pix_desc) {
722  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
723  return AVERROR_BUG;
724  }
725 
726  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
727  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
728  } else {
729  enum AVPixelFormat pix_fmts[] = {
730 #if CONFIG_MJPEG_NVDEC_HWACCEL
732 #endif
733 #if CONFIG_MJPEG_VAAPI_HWACCEL
735 #endif
736  s->avctx->pix_fmt,
738  };
739  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
740  if (s->hwaccel_pix_fmt < 0)
741  return AVERROR(EINVAL);
742 
743  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
744  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
745  }
746 
747  if (s->avctx->skip_frame == AVDISCARD_ALL) {
748  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
749  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
750  s->got_picture = 1;
751  return 0;
752  }
753 
754  av_frame_unref(s->picture_ptr);
755  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
756  return -1;
757  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
758  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
759  s->got_picture = 1;
760 
761  // Lets clear the palette to avoid leaving uninitialized values in it
762  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
763  memset(s->picture_ptr->data[1], 0, 1024);
764 
765  for (i = 0; i < 4; i++)
766  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
767 
768  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
769  s->width, s->height, s->linesize[0], s->linesize[1],
770  s->interlaced, s->avctx->height);
771 
772  }
773 
774  if ((s->rgb && !s->lossless && !s->ls) ||
775  (!s->rgb && s->ls && s->nb_components > 1) ||
776  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
777  ) {
778  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
779  return AVERROR_PATCHWELCOME;
780  }
781 
782  /* totally blank picture as progressive JPEG will only add details to it */
783  if (s->progressive) {
784  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
785  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
786  for (i = 0; i < s->nb_components; i++) {
787  int size = bw * bh * s->h_count[i] * s->v_count[i];
788  av_freep(&s->blocks[i]);
789  av_freep(&s->last_nnz[i]);
790  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
791  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
792  if (!s->blocks[i] || !s->last_nnz[i])
793  return AVERROR(ENOMEM);
794  s->block_stride[i] = bw * s->h_count[i];
795  }
796  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
797  }
798 
799  if (s->avctx->hwaccel) {
800  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
801  s->hwaccel_picture_private =
802  av_mallocz(hwaccel->frame_priv_data_size);
803  if (!s->hwaccel_picture_private)
804  return AVERROR(ENOMEM);
805 
806  ret = hwaccel->start_frame(s->avctx, s->raw_image_buffer,
807  s->raw_image_buffer_size);
808  if (ret < 0)
809  return ret;
810  }
811 
812  return 0;
813 }
814 
815 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
816 {
817  int code;
818  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
819  if (code < 0 || code > 16) {
820  av_log(s->avctx, AV_LOG_WARNING,
821  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
822  0, dc_index, &s->vlcs[0][dc_index]);
823  return 0xfffff;
824  }
825 
826  if (code)
827  return get_xbits(&s->gb, code);
828  else
829  return 0;
830 }
831 
832 /* decode block and dequantize */
833 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
834  int dc_index, int ac_index, uint16_t *quant_matrix)
835 {
836  int code, i, j, level, val;
837 
838  /* DC coef */
839  val = mjpeg_decode_dc(s, dc_index);
840  if (val == 0xfffff) {
841  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
842  return AVERROR_INVALIDDATA;
843  }
844  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
845  val = av_clip_int16(val);
846  s->last_dc[component] = val;
847  block[0] = val;
848  /* AC coefs */
849  i = 0;
850  {OPEN_READER(re, &s->gb);
851  do {
852  UPDATE_CACHE(re, &s->gb);
853  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
854 
855  i += ((unsigned)code) >> 4;
856  code &= 0xf;
857  if (code) {
858  if (code > MIN_CACHE_BITS - 16)
859  UPDATE_CACHE(re, &s->gb);
860 
861  {
862  int cache = GET_CACHE(re, &s->gb);
863  int sign = (~cache) >> 31;
864  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
865  }
866 
867  LAST_SKIP_BITS(re, &s->gb, code);
868 
869  if (i > 63) {
870  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
871  return AVERROR_INVALIDDATA;
872  }
873  j = s->permutated_scantable[i];
874  block[j] = level * quant_matrix[i];
875  }
876  } while (i < 63);
877  CLOSE_READER(re, &s->gb);}
878 
879  return 0;
880 }
881 
883  int component, int dc_index,
884  uint16_t *quant_matrix, int Al)
885 {
886  unsigned val;
887  s->bdsp.clear_block(block);
888  val = mjpeg_decode_dc(s, dc_index);
889  if (val == 0xfffff) {
890  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
891  return AVERROR_INVALIDDATA;
892  }
893  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
894  s->last_dc[component] = val;
895  block[0] = val;
896  return 0;
897 }
898 
899 /* decode block and dequantize - progressive JPEG version */
901  uint8_t *last_nnz, int ac_index,
902  uint16_t *quant_matrix,
903  int ss, int se, int Al, int *EOBRUN)
904 {
905  int code, i, j, val, run;
906  unsigned level;
907 
908  if (*EOBRUN) {
909  (*EOBRUN)--;
910  return 0;
911  }
912 
913  {
914  OPEN_READER(re, &s->gb);
915  for (i = ss; ; i++) {
916  UPDATE_CACHE(re, &s->gb);
917  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
918 
919  run = ((unsigned) code) >> 4;
920  code &= 0xF;
921  if (code) {
922  i += run;
923  if (code > MIN_CACHE_BITS - 16)
924  UPDATE_CACHE(re, &s->gb);
925 
926  {
927  int cache = GET_CACHE(re, &s->gb);
928  int sign = (~cache) >> 31;
929  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
930  }
931 
932  LAST_SKIP_BITS(re, &s->gb, code);
933 
934  if (i >= se) {
935  if (i == se) {
936  j = s->permutated_scantable[se];
937  block[j] = level * (quant_matrix[se] << Al);
938  break;
939  }
940  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
941  return AVERROR_INVALIDDATA;
942  }
943  j = s->permutated_scantable[i];
944  block[j] = level * (quant_matrix[i] << Al);
945  } else {
946  if (run == 0xF) {// ZRL - skip 15 coefficients
947  i += 15;
948  if (i >= se) {
949  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
950  return AVERROR_INVALIDDATA;
951  }
952  } else {
953  val = (1 << run);
954  if (run) {
955  UPDATE_CACHE(re, &s->gb);
956  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
957  LAST_SKIP_BITS(re, &s->gb, run);
958  }
959  *EOBRUN = val - 1;
960  break;
961  }
962  }
963  }
964  CLOSE_READER(re, &s->gb);
965  }
966 
967  if (i > *last_nnz)
968  *last_nnz = i;
969 
970  return 0;
971 }
972 
973 #define REFINE_BIT(j) { \
974  UPDATE_CACHE(re, &s->gb); \
975  sign = block[j] >> 15; \
976  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
977  ((quant_matrix[i] ^ sign) - sign) << Al; \
978  LAST_SKIP_BITS(re, &s->gb, 1); \
979 }
980 
981 #define ZERO_RUN \
982 for (; ; i++) { \
983  if (i > last) { \
984  i += run; \
985  if (i > se) { \
986  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
987  return -1; \
988  } \
989  break; \
990  } \
991  j = s->permutated_scantable[i]; \
992  if (block[j]) \
993  REFINE_BIT(j) \
994  else if (run-- == 0) \
995  break; \
996 }
997 
998 /* decode block and dequantize - progressive JPEG refinement pass */
1000  uint8_t *last_nnz,
1001  int ac_index, uint16_t *quant_matrix,
1002  int ss, int se, int Al, int *EOBRUN)
1003 {
1004  int code, i = ss, j, sign, val, run;
1005  int last = FFMIN(se, *last_nnz);
1006 
1007  OPEN_READER(re, &s->gb);
1008  if (*EOBRUN) {
1009  (*EOBRUN)--;
1010  } else {
1011  for (; ; i++) {
1012  UPDATE_CACHE(re, &s->gb);
1013  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1014 
1015  if (code & 0xF) {
1016  run = ((unsigned) code) >> 4;
1017  UPDATE_CACHE(re, &s->gb);
1018  val = SHOW_UBITS(re, &s->gb, 1);
1019  LAST_SKIP_BITS(re, &s->gb, 1);
1020  ZERO_RUN;
1021  j = s->permutated_scantable[i];
1022  val--;
1023  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1024  if (i == se) {
1025  if (i > *last_nnz)
1026  *last_nnz = i;
1027  CLOSE_READER(re, &s->gb);
1028  return 0;
1029  }
1030  } else {
1031  run = ((unsigned) code) >> 4;
1032  if (run == 0xF) {
1033  ZERO_RUN;
1034  } else {
1035  val = run;
1036  run = (1 << run);
1037  if (val) {
1038  UPDATE_CACHE(re, &s->gb);
1039  run += SHOW_UBITS(re, &s->gb, val);
1040  LAST_SKIP_BITS(re, &s->gb, val);
1041  }
1042  *EOBRUN = run - 1;
1043  break;
1044  }
1045  }
1046  }
1047 
1048  if (i > *last_nnz)
1049  *last_nnz = i;
1050  }
1051 
1052  for (; i <= last; i++) {
1053  j = s->permutated_scantable[i];
1054  if (block[j])
1055  REFINE_BIT(j)
1056  }
1057  CLOSE_READER(re, &s->gb);
1058 
1059  return 0;
1060 }
1061 #undef REFINE_BIT
1062 #undef ZERO_RUN
1063 
1064 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1065 {
1066  int i;
1067  int reset = 0;
1068 
1069  if (s->restart_interval) {
1070  s->restart_count--;
1071  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1072  align_get_bits(&s->gb);
1073  for (i = 0; i < nb_components; i++) /* reset dc */
1074  s->last_dc[i] = (4 << s->bits);
1075  }
1076 
1077  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1078  /* skip RSTn */
1079  if (s->restart_count == 0) {
1080  if( show_bits(&s->gb, i) == (1 << i) - 1
1081  || show_bits(&s->gb, i) == 0xFF) {
1082  int pos = get_bits_count(&s->gb);
1083  align_get_bits(&s->gb);
1084  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1085  skip_bits(&s->gb, 8);
1086  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1087  for (i = 0; i < nb_components; i++) /* reset dc */
1088  s->last_dc[i] = (4 << s->bits);
1089  reset = 1;
1090  } else
1091  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1092  }
1093  }
1094  }
1095  return reset;
1096 }
1097 
1098 /* Handles 1 to 4 components */
1099 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1100 {
1101  int i, mb_x, mb_y;
1102  unsigned width;
1103  uint16_t (*buffer)[4];
1104  int left[4], top[4], topleft[4];
1105  const int linesize = s->linesize[0];
1106  const int mask = ((1 << s->bits) - 1) << point_transform;
1107  int resync_mb_y = 0;
1108  int resync_mb_x = 0;
1109  int vpred[6];
1110 
1111  if (!s->bayer && s->nb_components < 3)
1112  return AVERROR_INVALIDDATA;
1113  if (s->bayer && s->nb_components > 2)
1114  return AVERROR_INVALIDDATA;
1115  if (s->nb_components <= 0 || s->nb_components > 4)
1116  return AVERROR_INVALIDDATA;
1117  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1118  return AVERROR_INVALIDDATA;
1119  if (s->bayer) {
1120  if (s->rct || s->pegasus_rct)
1121  return AVERROR_INVALIDDATA;
1122  }
1123 
1124 
1125  s->restart_count = s->restart_interval;
1126 
1127  if (s->restart_interval == 0)
1128  s->restart_interval = INT_MAX;
1129 
1130  if (s->bayer)
1131  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1132  else
1133  width = s->mb_width;
1134 
1135  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1136  if (!s->ljpeg_buffer)
1137  return AVERROR(ENOMEM);
1138 
1139  buffer = s->ljpeg_buffer;
1140 
1141  for (i = 0; i < 4; i++)
1142  buffer[0][i] = 1 << (s->bits - 1);
1143 
1144  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1145  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1146 
1147  if (s->interlaced && s->bottom_field)
1148  ptr += linesize >> 1;
1149 
1150  for (i = 0; i < 4; i++)
1151  top[i] = left[i] = topleft[i] = buffer[0][i];
1152 
1153  if ((mb_y * s->width) % s->restart_interval == 0) {
1154  for (i = 0; i < 6; i++)
1155  vpred[i] = 1 << (s->bits-1);
1156  }
1157 
1158  for (mb_x = 0; mb_x < width; mb_x++) {
1159  int modified_predictor = predictor;
1160 
1161  if (get_bits_left(&s->gb) < 1) {
1162  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1163  return AVERROR_INVALIDDATA;
1164  }
1165 
1166  if (s->restart_interval && !s->restart_count){
1167  s->restart_count = s->restart_interval;
1168  resync_mb_x = mb_x;
1169  resync_mb_y = mb_y;
1170  for(i=0; i<4; i++)
1171  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1172  }
1173  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1174  modified_predictor = 1;
1175 
1176  for (i=0;i<nb_components;i++) {
1177  int pred, dc;
1178 
1179  topleft[i] = top[i];
1180  top[i] = buffer[mb_x][i];
1181 
1182  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1183  if(dc == 0xFFFFF)
1184  return -1;
1185 
1186  if (!s->bayer || mb_x) {
1187  pred = left[i];
1188  } else { /* This path runs only for the first line in bayer images */
1189  vpred[i] += dc;
1190  pred = vpred[i] - dc;
1191  }
1192 
1193  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1194 
1195  left[i] = buffer[mb_x][i] =
1196  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1197  }
1198 
1199  if (s->restart_interval && !--s->restart_count) {
1200  align_get_bits(&s->gb);
1201  skip_bits(&s->gb, 16); /* skip RSTn */
1202  }
1203  }
1204  if (s->rct && s->nb_components == 4) {
1205  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1206  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1207  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1208  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1209  ptr[4*mb_x + 0] = buffer[mb_x][3];
1210  }
1211  } else if (s->nb_components == 4) {
1212  for(i=0; i<nb_components; i++) {
1213  int c= s->comp_index[i];
1214  if (s->bits <= 8) {
1215  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1216  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1217  }
1218  } else if(s->bits == 9) {
1219  return AVERROR_PATCHWELCOME;
1220  } else {
1221  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1222  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1223  }
1224  }
1225  }
1226  } else if (s->rct) {
1227  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1228  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1229  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1230  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1231  }
1232  } else if (s->pegasus_rct) {
1233  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1234  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1235  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1236  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1237  }
1238  } else if (s->bayer) {
1239  if (s->bits <= 8)
1240  return AVERROR_PATCHWELCOME;
1241  if (nb_components == 1) {
1242  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1243  for (mb_x = 0; mb_x < width; mb_x++)
1244  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1245  } else if (nb_components == 2) {
1246  for (mb_x = 0; mb_x < width; mb_x++) {
1247  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1248  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1249  }
1250  }
1251  } else {
1252  for(i=0; i<nb_components; i++) {
1253  int c= s->comp_index[i];
1254  if (s->bits <= 8) {
1255  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1256  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1257  }
1258  } else if(s->bits == 9) {
1259  return AVERROR_PATCHWELCOME;
1260  } else {
1261  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1262  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1263  }
1264  }
1265  }
1266  }
1267  }
1268  return 0;
1269 }
1270 
1272  int point_transform, int nb_components)
1273 {
1274  int i, mb_x, mb_y, mask;
1275  int bits= (s->bits+7)&~7;
1276  int resync_mb_y = 0;
1277  int resync_mb_x = 0;
1278 
1279  point_transform += bits - s->bits;
1280  mask = ((1 << s->bits) - 1) << point_transform;
1281 
1282  av_assert0(nb_components>=1 && nb_components<=4);
1283 
1284  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1285  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1286  if (get_bits_left(&s->gb) < 1) {
1287  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1288  return AVERROR_INVALIDDATA;
1289  }
1290  if (s->restart_interval && !s->restart_count){
1291  s->restart_count = s->restart_interval;
1292  resync_mb_x = mb_x;
1293  resync_mb_y = mb_y;
1294  }
1295 
1296  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1297  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1298  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1299  for (i = 0; i < nb_components; i++) {
1300  uint8_t *ptr;
1301  uint16_t *ptr16;
1302  int n, h, v, x, y, c, j, linesize;
1303  n = s->nb_blocks[i];
1304  c = s->comp_index[i];
1305  h = s->h_scount[i];
1306  v = s->v_scount[i];
1307  x = 0;
1308  y = 0;
1309  linesize= s->linesize[c];
1310 
1311  if(bits>8) linesize /= 2;
1312 
1313  for(j=0; j<n; j++) {
1314  int pred, dc;
1315 
1316  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1317  if(dc == 0xFFFFF)
1318  return -1;
1319  if ( h * mb_x + x >= s->width
1320  || v * mb_y + y >= s->height) {
1321  // Nothing to do
1322  } else if (bits<=8) {
1323  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1324  if(y==0 && toprow){
1325  if(x==0 && leftcol){
1326  pred= 1 << (bits - 1);
1327  }else{
1328  pred= ptr[-1];
1329  }
1330  }else{
1331  if(x==0 && leftcol){
1332  pred= ptr[-linesize];
1333  }else{
1334  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1335  }
1336  }
1337 
1338  if (s->interlaced && s->bottom_field)
1339  ptr += linesize >> 1;
1340  pred &= mask;
1341  *ptr= pred + ((unsigned)dc << point_transform);
1342  }else{
1343  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1344  if(y==0 && toprow){
1345  if(x==0 && leftcol){
1346  pred= 1 << (bits - 1);
1347  }else{
1348  pred= ptr16[-1];
1349  }
1350  }else{
1351  if(x==0 && leftcol){
1352  pred= ptr16[-linesize];
1353  }else{
1354  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1355  }
1356  }
1357 
1358  if (s->interlaced && s->bottom_field)
1359  ptr16 += linesize >> 1;
1360  pred &= mask;
1361  *ptr16= pred + ((unsigned)dc << point_transform);
1362  }
1363  if (++x == h) {
1364  x = 0;
1365  y++;
1366  }
1367  }
1368  }
1369  } else {
1370  for (i = 0; i < nb_components; i++) {
1371  uint8_t *ptr;
1372  uint16_t *ptr16;
1373  int n, h, v, x, y, c, j, linesize, dc;
1374  n = s->nb_blocks[i];
1375  c = s->comp_index[i];
1376  h = s->h_scount[i];
1377  v = s->v_scount[i];
1378  x = 0;
1379  y = 0;
1380  linesize = s->linesize[c];
1381 
1382  if(bits>8) linesize /= 2;
1383 
1384  for (j = 0; j < n; j++) {
1385  int pred;
1386 
1387  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1388  if(dc == 0xFFFFF)
1389  return -1;
1390  if ( h * mb_x + x >= s->width
1391  || v * mb_y + y >= s->height) {
1392  // Nothing to do
1393  } else if (bits<=8) {
1394  ptr = s->picture_ptr->data[c] +
1395  (linesize * (v * mb_y + y)) +
1396  (h * mb_x + x); //FIXME optimize this crap
1397  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1398 
1399  pred &= mask;
1400  *ptr = pred + ((unsigned)dc << point_transform);
1401  }else{
1402  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1403  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1404 
1405  pred &= mask;
1406  *ptr16= pred + ((unsigned)dc << point_transform);
1407  }
1408 
1409  if (++x == h) {
1410  x = 0;
1411  y++;
1412  }
1413  }
1414  }
1415  }
1416  if (s->restart_interval && !--s->restart_count) {
1417  align_get_bits(&s->gb);
1418  skip_bits(&s->gb, 16); /* skip RSTn */
1419  }
1420  }
1421  }
1422  return 0;
1423 }
1424 
1426  uint8_t *dst, const uint8_t *src,
1427  int linesize, int lowres)
1428 {
1429  switch (lowres) {
1430  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1431  break;
1432  case 1: copy_block4(dst, src, linesize, linesize, 4);
1433  break;
1434  case 2: copy_block2(dst, src, linesize, linesize, 2);
1435  break;
1436  case 3: *dst = *src;
1437  break;
1438  }
1439 }
1440 
1441 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1442 {
1443  int block_x, block_y;
1444  int size = 8 >> s->avctx->lowres;
1445  if (s->bits > 8) {
1446  for (block_y=0; block_y<size; block_y++)
1447  for (block_x=0; block_x<size; block_x++)
1448  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1449  } else {
1450  for (block_y=0; block_y<size; block_y++)
1451  for (block_x=0; block_x<size; block_x++)
1452  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1453  }
1454 }
1455 
1456 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1457  int Al, const uint8_t *mb_bitmask,
1458  int mb_bitmask_size,
1459  const AVFrame *reference)
1460 {
1461  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1462  uint8_t *data[MAX_COMPONENTS];
1463  const uint8_t *reference_data[MAX_COMPONENTS];
1464  int linesize[MAX_COMPONENTS];
1465  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1466  int bytes_per_pixel = 1 + (s->bits > 8);
1467 
1468  if (mb_bitmask) {
1469  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1470  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1471  return AVERROR_INVALIDDATA;
1472  }
1473  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1474  }
1475 
1476  s->restart_count = 0;
1477 
1478  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1479  &chroma_v_shift);
1480  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1481  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1482 
1483  for (i = 0; i < nb_components; i++) {
1484  int c = s->comp_index[i];
1485  data[c] = s->picture_ptr->data[c];
1486  reference_data[c] = reference ? reference->data[c] : NULL;
1487  linesize[c] = s->linesize[c];
1488  s->coefs_finished[c] |= 1;
1489  }
1490 
1491  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1492  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1493  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1494 
1495  if (s->restart_interval && !s->restart_count)
1496  s->restart_count = s->restart_interval;
1497 
1498  if (get_bits_left(&s->gb) < 0) {
1499  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1500  -get_bits_left(&s->gb));
1501  return AVERROR_INVALIDDATA;
1502  }
1503  for (i = 0; i < nb_components; i++) {
1504  uint8_t *ptr;
1505  int n, h, v, x, y, c, j;
1506  int block_offset;
1507  n = s->nb_blocks[i];
1508  c = s->comp_index[i];
1509  h = s->h_scount[i];
1510  v = s->v_scount[i];
1511  x = 0;
1512  y = 0;
1513  for (j = 0; j < n; j++) {
1514  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1515  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1516 
1517  if (s->interlaced && s->bottom_field)
1518  block_offset += linesize[c] >> 1;
1519  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1520  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1521  ptr = data[c] + block_offset;
1522  } else
1523  ptr = NULL;
1524  if (!s->progressive) {
1525  if (copy_mb) {
1526  if (ptr)
1527  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1528  linesize[c], s->avctx->lowres);
1529 
1530  } else {
1531  s->bdsp.clear_block(s->block);
1532  if (decode_block(s, s->block, i,
1533  s->dc_index[i], s->ac_index[i],
1534  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1535  av_log(s->avctx, AV_LOG_ERROR,
1536  "error y=%d x=%d\n", mb_y, mb_x);
1537  return AVERROR_INVALIDDATA;
1538  }
1539  if (ptr && linesize[c]) {
1540  s->idsp.idct_put(ptr, linesize[c], s->block);
1541  if (s->bits & 7)
1542  shift_output(s, ptr, linesize[c]);
1543  }
1544  }
1545  } else {
1546  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1547  (h * mb_x + x);
1548  int16_t *block = s->blocks[c][block_idx];
1549  if (Ah)
1550  block[0] += get_bits1(&s->gb) *
1551  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1552  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1553  s->quant_matrixes[s->quant_sindex[i]],
1554  Al) < 0) {
1555  av_log(s->avctx, AV_LOG_ERROR,
1556  "error y=%d x=%d\n", mb_y, mb_x);
1557  return AVERROR_INVALIDDATA;
1558  }
1559  }
1560  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1561  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1562  mb_x, mb_y, x, y, c, s->bottom_field,
1563  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1564  if (++x == h) {
1565  x = 0;
1566  y++;
1567  }
1568  }
1569  }
1570 
1571  handle_rstn(s, nb_components);
1572  }
1573  }
1574  return 0;
1575 }
1576 
1578  int se, int Ah, int Al)
1579 {
1580  int mb_x, mb_y;
1581  int EOBRUN = 0;
1582  int c = s->comp_index[0];
1583  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1584 
1585  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1586  if (se < ss || se > 63) {
1587  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1588  return AVERROR_INVALIDDATA;
1589  }
1590 
1591  // s->coefs_finished is a bitmask for coefficients coded
1592  // ss and se are parameters telling start and end coefficients
1593  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1594 
1595  s->restart_count = 0;
1596 
1597  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1598  int block_idx = mb_y * s->block_stride[c];
1599  int16_t (*block)[64] = &s->blocks[c][block_idx];
1600  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1601  if (get_bits_left(&s->gb) <= 0) {
1602  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1603  return AVERROR_INVALIDDATA;
1604  }
1605  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1606  int ret;
1607  if (s->restart_interval && !s->restart_count)
1608  s->restart_count = s->restart_interval;
1609 
1610  if (Ah)
1611  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1612  quant_matrix, ss, se, Al, &EOBRUN);
1613  else
1614  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1615  quant_matrix, ss, se, Al, &EOBRUN);
1616 
1617  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1619  if (ret < 0) {
1620  av_log(s->avctx, AV_LOG_ERROR,
1621  "error y=%d x=%d\n", mb_y, mb_x);
1622  return AVERROR_INVALIDDATA;
1623  }
1624 
1625  if (handle_rstn(s, 0))
1626  EOBRUN = 0;
1627  }
1628  }
1629  return 0;
1630 }
1631 
1633 {
1634  int mb_x, mb_y;
1635  int c;
1636  const int bytes_per_pixel = 1 + (s->bits > 8);
1637  const int block_size = s->lossless ? 1 : 8;
1638 
1639  for (c = 0; c < s->nb_components; c++) {
1640  uint8_t *data = s->picture_ptr->data[c];
1641  int linesize = s->linesize[c];
1642  int h = s->h_max / s->h_count[c];
1643  int v = s->v_max / s->v_count[c];
1644  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1645  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1646 
1647  if (~s->coefs_finished[c])
1648  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1649 
1650  if (s->interlaced && s->bottom_field)
1651  data += linesize >> 1;
1652 
1653  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1654  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1655  int block_idx = mb_y * s->block_stride[c];
1656  int16_t (*block)[64] = &s->blocks[c][block_idx];
1657  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1658  s->idsp.idct_put(ptr, linesize, *block);
1659  if (s->bits & 7)
1660  shift_output(s, ptr, linesize);
1661  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1662  }
1663  }
1664  }
1665 }
1666 
1667 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1668  int mb_bitmask_size, const AVFrame *reference)
1669 {
1670  int len, nb_components, i, h, v, predictor, point_transform;
1671  int index, id, ret;
1672  const int block_size = s->lossless ? 1 : 8;
1673  int ilv, prev_shift;
1674 
1675  if (!s->got_picture) {
1676  av_log(s->avctx, AV_LOG_WARNING,
1677  "Can not process SOS before SOF, skipping\n");
1678  return -1;
1679  }
1680 
1681  if (reference) {
1682  if (reference->width != s->picture_ptr->width ||
1683  reference->height != s->picture_ptr->height ||
1684  reference->format != s->picture_ptr->format) {
1685  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1686  return AVERROR_INVALIDDATA;
1687  }
1688  }
1689 
1690  /* XXX: verify len field validity */
1691  len = get_bits(&s->gb, 16);
1692  nb_components = get_bits(&s->gb, 8);
1693  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1695  "decode_sos: nb_components (%d)",
1696  nb_components);
1697  return AVERROR_PATCHWELCOME;
1698  }
1699  if (len != 6 + 2 * nb_components) {
1700  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1701  return AVERROR_INVALIDDATA;
1702  }
1703  for (i = 0; i < nb_components; i++) {
1704  id = get_bits(&s->gb, 8);
1705  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1706  /* find component index */
1707  for (index = 0; index < s->nb_components; index++)
1708  if (id == s->component_id[index])
1709  break;
1710  if (index == s->nb_components) {
1711  av_log(s->avctx, AV_LOG_ERROR,
1712  "decode_sos: index(%d) out of components\n", index);
1713  return AVERROR_INVALIDDATA;
1714  }
1715  /* Metasoft MJPEG codec has Cb and Cr swapped */
1716  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1717  && nb_components == 3 && s->nb_components == 3 && i)
1718  index = 3 - i;
1719 
1720  s->quant_sindex[i] = s->quant_index[index];
1721  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1722  s->h_scount[i] = s->h_count[index];
1723  s->v_scount[i] = s->v_count[index];
1724 
1725  s->comp_index[i] = index;
1726 
1727  s->dc_index[i] = get_bits(&s->gb, 4);
1728  s->ac_index[i] = get_bits(&s->gb, 4);
1729 
1730  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1731  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1732  goto out_of_range;
1733  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1734  goto out_of_range;
1735  }
1736 
1737  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1738  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1739  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1740  prev_shift = get_bits(&s->gb, 4); /* Ah */
1741  point_transform = get_bits(&s->gb, 4); /* Al */
1742  }else
1743  prev_shift = point_transform = 0;
1744 
1745  if (nb_components > 1) {
1746  /* interleaved stream */
1747  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1748  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1749  } else if (!s->ls) { /* skip this for JPEG-LS */
1750  h = s->h_max / s->h_scount[0];
1751  v = s->v_max / s->v_scount[0];
1752  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1753  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1754  s->nb_blocks[0] = 1;
1755  s->h_scount[0] = 1;
1756  s->v_scount[0] = 1;
1757  }
1758 
1759  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1760  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1761  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1762  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1763  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1764 
1765 
1766  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1767  for (i = s->mjpb_skiptosod; i > 0; i--)
1768  skip_bits(&s->gb, 8);
1769 
1770 next_field:
1771  for (i = 0; i < nb_components; i++)
1772  s->last_dc[i] = (4 << s->bits);
1773 
1774  if (s->avctx->hwaccel) {
1775  int bytes_to_start = get_bits_count(&s->gb) / 8;
1776  av_assert0(bytes_to_start >= 0 &&
1777  s->raw_scan_buffer_size >= bytes_to_start);
1778 
1779  ret = FF_HW_CALL(s->avctx, decode_slice,
1780  s->raw_scan_buffer + bytes_to_start,
1781  s->raw_scan_buffer_size - bytes_to_start);
1782  if (ret < 0)
1783  return ret;
1784 
1785  } else if (s->lossless) {
1786  av_assert0(s->picture_ptr == s->picture);
1787  if (CONFIG_JPEGLS_DECODER && s->ls) {
1788 // for () {
1789 // reset_ls_coding_parameters(s, 0);
1790 
1792  point_transform, ilv)) < 0)
1793  return ret;
1794  } else {
1795  if (s->rgb || s->bayer) {
1796  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1797  return ret;
1798  } else {
1800  point_transform,
1801  nb_components)) < 0)
1802  return ret;
1803  }
1804  }
1805  } else {
1806  if (s->progressive && predictor) {
1807  av_assert0(s->picture_ptr == s->picture);
1809  ilv, prev_shift,
1810  point_transform)) < 0)
1811  return ret;
1812  } else {
1813  if ((ret = mjpeg_decode_scan(s, nb_components,
1814  prev_shift, point_transform,
1815  mb_bitmask, mb_bitmask_size, reference)) < 0)
1816  return ret;
1817  }
1818  }
1819 
1820  if (s->interlaced &&
1821  get_bits_left(&s->gb) > 32 &&
1822  show_bits(&s->gb, 8) == 0xFF) {
1823  GetBitContext bak = s->gb;
1824  align_get_bits(&bak);
1825  if (show_bits(&bak, 16) == 0xFFD1) {
1826  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1827  s->gb = bak;
1828  skip_bits(&s->gb, 16);
1829  s->bottom_field ^= 1;
1830 
1831  goto next_field;
1832  }
1833  }
1834 
1835  emms_c();
1836  return 0;
1837  out_of_range:
1838  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1839  return AVERROR_INVALIDDATA;
1840 }
1841 
1843 {
1844  if (get_bits(&s->gb, 16) != 4)
1845  return AVERROR_INVALIDDATA;
1846  s->restart_interval = get_bits(&s->gb, 16);
1847  s->restart_count = 0;
1848  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1849  s->restart_interval);
1850 
1851  return 0;
1852 }
1853 
1855 {
1856  int len, id, i;
1857 
1858  len = get_bits(&s->gb, 16);
1859  if (len < 6) {
1860  if (s->bayer) {
1861  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1862  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1863  skip_bits(&s->gb, len);
1864  return 0;
1865  } else
1866  return AVERROR_INVALIDDATA;
1867  }
1868  if (8 * len > get_bits_left(&s->gb))
1869  return AVERROR_INVALIDDATA;
1870 
1871  id = get_bits_long(&s->gb, 32);
1872  len -= 6;
1873 
1874  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1875  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1876  av_fourcc2str(av_bswap32(id)), id, len);
1877 
1878  /* Buggy AVID, it puts EOI only at every 10th frame. */
1879  /* Also, this fourcc is used by non-avid files too, it holds some
1880  information, but it's always present in AVID-created files. */
1881  if (id == AV_RB32("AVI1")) {
1882  /* structure:
1883  4bytes AVI1
1884  1bytes polarity
1885  1bytes always zero
1886  4bytes field_size
1887  4bytes field_size_less_padding
1888  */
1889  s->buggy_avid = 1;
1890  i = get_bits(&s->gb, 8); len--;
1891  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1892  goto out;
1893  }
1894 
1895  if (id == AV_RB32("JFIF")) {
1896  int t_w, t_h, v1, v2;
1897  if (len < 8)
1898  goto out;
1899  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1900  v1 = get_bits(&s->gb, 8);
1901  v2 = get_bits(&s->gb, 8);
1902  skip_bits(&s->gb, 8);
1903 
1904  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1905  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1906  if ( s->avctx->sample_aspect_ratio.num <= 0
1907  || s->avctx->sample_aspect_ratio.den <= 0) {
1908  s->avctx->sample_aspect_ratio.num = 0;
1909  s->avctx->sample_aspect_ratio.den = 1;
1910  }
1911 
1912  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1913  av_log(s->avctx, AV_LOG_INFO,
1914  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1915  v1, v2,
1916  s->avctx->sample_aspect_ratio.num,
1917  s->avctx->sample_aspect_ratio.den);
1918 
1919  len -= 8;
1920  if (len >= 2) {
1921  t_w = get_bits(&s->gb, 8);
1922  t_h = get_bits(&s->gb, 8);
1923  if (t_w && t_h) {
1924  /* skip thumbnail */
1925  if (len -10 - (t_w * t_h * 3) > 0)
1926  len -= t_w * t_h * 3;
1927  }
1928  len -= 2;
1929  }
1930  goto out;
1931  }
1932 
1933  if ( id == AV_RB32("Adob")
1934  && len >= 7
1935  && show_bits(&s->gb, 8) == 'e'
1936  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1937  skip_bits(&s->gb, 8); /* 'e' */
1938  skip_bits(&s->gb, 16); /* version */
1939  skip_bits(&s->gb, 16); /* flags0 */
1940  skip_bits(&s->gb, 16); /* flags1 */
1941  s->adobe_transform = get_bits(&s->gb, 8);
1942  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1943  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1944  len -= 7;
1945  goto out;
1946  }
1947 
1948  if (id == AV_RB32("LJIF")) {
1949  int rgb = s->rgb;
1950  int pegasus_rct = s->pegasus_rct;
1951  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1952  av_log(s->avctx, AV_LOG_INFO,
1953  "Pegasus lossless jpeg header found\n");
1954  skip_bits(&s->gb, 16); /* version ? */
1955  skip_bits(&s->gb, 16); /* unknown always 0? */
1956  skip_bits(&s->gb, 16); /* unknown always 0? */
1957  skip_bits(&s->gb, 16); /* unknown always 0? */
1958  switch (i=get_bits(&s->gb, 8)) {
1959  case 1:
1960  rgb = 1;
1961  pegasus_rct = 0;
1962  break;
1963  case 2:
1964  rgb = 1;
1965  pegasus_rct = 1;
1966  break;
1967  default:
1968  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1969  }
1970 
1971  len -= 9;
1972  if (s->bayer)
1973  goto out;
1974  if (s->got_picture)
1975  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1976  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1977  goto out;
1978  }
1979 
1980  s->rgb = rgb;
1981  s->pegasus_rct = pegasus_rct;
1982 
1983  goto out;
1984  }
1985  if (id == AV_RL32("colr") && len > 0) {
1986  s->colr = get_bits(&s->gb, 8);
1987  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1988  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1989  len --;
1990  goto out;
1991  }
1992  if (id == AV_RL32("xfrm") && len > 0) {
1993  s->xfrm = get_bits(&s->gb, 8);
1994  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1995  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1996  len --;
1997  goto out;
1998  }
1999 
2000  /* JPS extension by VRex */
2001  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
2002  int flags, layout, type;
2003  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2004  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
2005 
2006  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
2007  skip_bits(&s->gb, 16); len -= 2; /* block length */
2008  skip_bits(&s->gb, 8); /* reserved */
2009  flags = get_bits(&s->gb, 8);
2010  layout = get_bits(&s->gb, 8);
2011  type = get_bits(&s->gb, 8);
2012  len -= 4;
2013 
2014  av_freep(&s->stereo3d);
2015  s->stereo3d = av_stereo3d_alloc();
2016  if (!s->stereo3d) {
2017  goto out;
2018  }
2019  if (type == 0) {
2020  s->stereo3d->type = AV_STEREO3D_2D;
2021  } else if (type == 1) {
2022  switch (layout) {
2023  case 0x01:
2024  s->stereo3d->type = AV_STEREO3D_LINES;
2025  break;
2026  case 0x02:
2027  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2028  break;
2029  case 0x03:
2030  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2031  break;
2032  }
2033  if (!(flags & 0x04)) {
2034  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2035  }
2036  }
2037  goto out;
2038  }
2039 
2040  /* EXIF metadata */
2041  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2042  GetByteContext gbytes;
2043  int ret, le, ifd_offset, bytes_read;
2044  const uint8_t *aligned;
2045 
2046  skip_bits(&s->gb, 16); // skip padding
2047  len -= 2;
2048 
2049  // init byte wise reading
2050  aligned = align_get_bits(&s->gb);
2051  bytestream2_init(&gbytes, aligned, len);
2052 
2053  // read TIFF header
2054  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2055  if (ret) {
2056  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2057  } else {
2058  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2059 
2060  // read 0th IFD and store the metadata
2061  // (return values > 0 indicate the presence of subimage metadata)
2062  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2063  if (ret < 0) {
2064  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2065  }
2066  }
2067 
2068  bytes_read = bytestream2_tell(&gbytes);
2069  skip_bits(&s->gb, bytes_read << 3);
2070  len -= bytes_read;
2071 
2072  goto out;
2073  }
2074 
2075  /* Apple MJPEG-A */
2076  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2077  id = get_bits_long(&s->gb, 32);
2078  len -= 4;
2079  /* Apple MJPEG-A */
2080  if (id == AV_RB32("mjpg")) {
2081  /* structure:
2082  4bytes field size
2083  4bytes pad field size
2084  4bytes next off
2085  4bytes quant off
2086  4bytes huff off
2087  4bytes image off
2088  4bytes scan off
2089  4bytes data off
2090  */
2091  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2092  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2093  }
2094  }
2095 
2096  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2097  int id2;
2098  unsigned seqno;
2099  unsigned nummarkers;
2100 
2101  id = get_bits_long(&s->gb, 32);
2102  id2 = get_bits(&s->gb, 24);
2103  len -= 7;
2104  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2105  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2106  goto out;
2107  }
2108 
2109  skip_bits(&s->gb, 8);
2110  seqno = get_bits(&s->gb, 8);
2111  len -= 2;
2112  if (seqno == 0) {
2113  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2114  goto out;
2115  }
2116 
2117  nummarkers = get_bits(&s->gb, 8);
2118  len -= 1;
2119  if (nummarkers == 0) {
2120  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2121  goto out;
2122  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2123  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2124  goto out;
2125  } else if (seqno > nummarkers) {
2126  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2127  goto out;
2128  }
2129 
2130  /* Allocate if this is the first APP2 we've seen. */
2131  if (s->iccnum == 0) {
2132  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2133  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2134  return AVERROR(ENOMEM);
2135  }
2136  s->iccnum = nummarkers;
2137  }
2138 
2139  if (s->iccentries[seqno - 1].data) {
2140  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2141  goto out;
2142  }
2143 
2144  s->iccentries[seqno - 1].length = len;
2145  s->iccentries[seqno - 1].data = av_malloc(len);
2146  if (!s->iccentries[seqno - 1].data) {
2147  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2148  return AVERROR(ENOMEM);
2149  }
2150 
2151  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2152  skip_bits(&s->gb, len << 3);
2153  len = 0;
2154  s->iccread++;
2155 
2156  if (s->iccread > s->iccnum)
2157  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2158  }
2159 
2160 out:
2161  /* slow but needed for extreme adobe jpegs */
2162  if (len < 0)
2163  av_log(s->avctx, AV_LOG_ERROR,
2164  "mjpeg: error, decode_app parser read over the end\n");
2165  while (--len > 0)
2166  skip_bits(&s->gb, 8);
2167 
2168  return 0;
2169 }
2170 
2172 {
2173  int len = get_bits(&s->gb, 16);
2174  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2175  int i;
2176  char *cbuf = av_malloc(len - 1);
2177  if (!cbuf)
2178  return AVERROR(ENOMEM);
2179 
2180  for (i = 0; i < len - 2; i++)
2181  cbuf[i] = get_bits(&s->gb, 8);
2182  if (i > 0 && cbuf[i - 1] == '\n')
2183  cbuf[i - 1] = 0;
2184  else
2185  cbuf[i] = 0;
2186 
2187  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2188  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2189 
2190  /* buggy avid, it puts EOI only at every 10th frame */
2191  if (!strncmp(cbuf, "AVID", 4)) {
2192  parse_avid(s, cbuf, len);
2193  } else if (!strcmp(cbuf, "CS=ITU601"))
2194  s->cs_itu601 = 1;
2195  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2196  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2197  s->flipped = 1;
2198  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2199  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2200  s->multiscope = 2;
2201  }
2202 
2203  av_free(cbuf);
2204  }
2205 
2206  return 0;
2207 }
2208 
2209 /* return the 8 bit start code value and update the search
2210  state. Return -1 if no start code found */
2211 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2212 {
2213  const uint8_t *buf_ptr;
2214  unsigned int v, v2;
2215  int val;
2216  int skipped = 0;
2217 
2218  buf_ptr = *pbuf_ptr;
2219  while (buf_end - buf_ptr > 1) {
2220  v = *buf_ptr++;
2221  v2 = *buf_ptr;
2222  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2223  val = *buf_ptr++;
2224  goto found;
2225  }
2226  skipped++;
2227  }
2228  buf_ptr = buf_end;
2229  val = -1;
2230 found:
2231  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2232  *pbuf_ptr = buf_ptr;
2233  return val;
2234 }
2235 
2237  const uint8_t **buf_ptr, const uint8_t *buf_end,
2238  const uint8_t **unescaped_buf_ptr,
2239  int *unescaped_buf_size)
2240 {
2241  int start_code;
2242  start_code = find_marker(buf_ptr, buf_end);
2243 
2244  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2245  if (!s->buffer)
2246  return AVERROR(ENOMEM);
2247 
2248  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2249  if (start_code == SOS && !s->ls) {
2250  const uint8_t *src = *buf_ptr;
2251  const uint8_t *ptr = src;
2252  uint8_t *dst = s->buffer;
2253 
2254  #define copy_data_segment(skip) do { \
2255  ptrdiff_t length = (ptr - src) - (skip); \
2256  if (length > 0) { \
2257  memcpy(dst, src, length); \
2258  dst += length; \
2259  src = ptr; \
2260  } \
2261  } while (0)
2262 
2263  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2264  ptr = buf_end;
2265  copy_data_segment(0);
2266  } else {
2267  while (ptr < buf_end) {
2268  uint8_t x = *(ptr++);
2269 
2270  if (x == 0xff) {
2271  ptrdiff_t skip = 0;
2272  while (ptr < buf_end && x == 0xff) {
2273  x = *(ptr++);
2274  skip++;
2275  }
2276 
2277  /* 0xFF, 0xFF, ... */
2278  if (skip > 1) {
2280 
2281  /* decrement src as it is equal to ptr after the
2282  * copy_data_segment macro and we might want to
2283  * copy the current value of x later on */
2284  src--;
2285  }
2286 
2287  if (x < RST0 || x > RST7) {
2288  copy_data_segment(1);
2289  if (x)
2290  break;
2291  }
2292  }
2293  }
2294  if (src < ptr)
2295  copy_data_segment(0);
2296  }
2297  #undef copy_data_segment
2298 
2299  *unescaped_buf_ptr = s->buffer;
2300  *unescaped_buf_size = dst - s->buffer;
2301  memset(s->buffer + *unescaped_buf_size, 0,
2303 
2304  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2305  (buf_end - *buf_ptr) - (dst - s->buffer));
2306  } else if (start_code == SOS && s->ls) {
2307  const uint8_t *src = *buf_ptr;
2308  uint8_t *dst = s->buffer;
2309  int bit_count = 0;
2310  int t = 0, b = 0;
2311  PutBitContext pb;
2312 
2313  /* find marker */
2314  while (src + t < buf_end) {
2315  uint8_t x = src[t++];
2316  if (x == 0xff) {
2317  while ((src + t < buf_end) && x == 0xff)
2318  x = src[t++];
2319  if (x & 0x80) {
2320  t -= FFMIN(2, t);
2321  break;
2322  }
2323  }
2324  }
2325  bit_count = t * 8;
2326  init_put_bits(&pb, dst, t);
2327 
2328  /* unescape bitstream */
2329  while (b < t) {
2330  uint8_t x = src[b++];
2331  put_bits(&pb, 8, x);
2332  if (x == 0xFF && b < t) {
2333  x = src[b++];
2334  if (x & 0x80) {
2335  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2336  x &= 0x7f;
2337  }
2338  put_bits(&pb, 7, x);
2339  bit_count--;
2340  }
2341  }
2342  flush_put_bits(&pb);
2343 
2344  *unescaped_buf_ptr = dst;
2345  *unescaped_buf_size = (bit_count + 7) >> 3;
2346  memset(s->buffer + *unescaped_buf_size, 0,
2348  } else {
2349  *unescaped_buf_ptr = *buf_ptr;
2350  *unescaped_buf_size = buf_end - *buf_ptr;
2351  }
2352 
2353  return start_code;
2354 }
2355 
2357 {
2358  int i;
2359 
2360  if (s->iccentries) {
2361  for (i = 0; i < s->iccnum; i++)
2362  av_freep(&s->iccentries[i].data);
2363  av_freep(&s->iccentries);
2364  }
2365 
2366  s->iccread = 0;
2367  s->iccnum = 0;
2368 }
2369 
2371  int *got_frame, const AVPacket *avpkt,
2372  const uint8_t *buf, const int buf_size)
2373 {
2374  MJpegDecodeContext *s = avctx->priv_data;
2375  const uint8_t *buf_end, *buf_ptr;
2376  const uint8_t *unescaped_buf_ptr;
2377  int hshift, vshift;
2378  int unescaped_buf_size;
2379  int start_code;
2380  int i, index;
2381  int ret = 0;
2382  int is16bit;
2383  AVDictionaryEntry *e = NULL;
2384 
2385  s->force_pal8 = 0;
2386 
2387  s->buf_size = buf_size;
2388 
2389  av_dict_free(&s->exif_metadata);
2390  av_freep(&s->stereo3d);
2391  s->adobe_transform = -1;
2392 
2393  if (s->iccnum != 0)
2395 
2396 redo_for_pal8:
2397  buf_ptr = buf;
2398  buf_end = buf + buf_size;
2399  while (buf_ptr < buf_end) {
2400  /* find start next marker */
2401  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2402  &unescaped_buf_ptr,
2403  &unescaped_buf_size);
2404  /* EOF */
2405  if (start_code < 0) {
2406  break;
2407  } else if (unescaped_buf_size > INT_MAX / 8) {
2408  av_log(avctx, AV_LOG_ERROR,
2409  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2410  start_code, unescaped_buf_size, buf_size);
2411  return AVERROR_INVALIDDATA;
2412  }
2413  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2414  start_code, buf_end - buf_ptr);
2415 
2416  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2417 
2418  if (ret < 0) {
2419  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2420  goto fail;
2421  }
2422 
2423  s->start_code = start_code;
2424  if (avctx->debug & FF_DEBUG_STARTCODE)
2425  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2426 
2427  /* process markers */
2428  if (start_code >= RST0 && start_code <= RST7) {
2429  av_log(avctx, AV_LOG_DEBUG,
2430  "restart marker: %d\n", start_code & 0x0f);
2431  /* APP fields */
2432  } else if (start_code >= APP0 && start_code <= APP15) {
2433  if ((ret = mjpeg_decode_app(s)) < 0)
2434  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2435  av_err2str(ret));
2436  /* Comment */
2437  } else if (start_code == COM) {
2438  ret = mjpeg_decode_com(s);
2439  if (ret < 0)
2440  return ret;
2441  } else if (start_code == DQT) {
2443  if (ret < 0)
2444  return ret;
2445  }
2446 
2447  ret = -1;
2448 
2449  if (!CONFIG_JPEGLS_DECODER &&
2450  (start_code == SOF48 || start_code == LSE)) {
2451  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2452  return AVERROR(ENOSYS);
2453  }
2454 
2455  if (avctx->skip_frame == AVDISCARD_ALL) {
2456  switch(start_code) {
2457  case SOF0:
2458  case SOF1:
2459  case SOF2:
2460  case SOF3:
2461  case SOF48:
2462  case SOI:
2463  case SOS:
2464  case EOI:
2465  break;
2466  default:
2467  goto skip;
2468  }
2469  }
2470 
2471  switch (start_code) {
2472  case SOI:
2473  s->restart_interval = 0;
2474  s->restart_count = 0;
2475  s->raw_image_buffer = buf_ptr;
2476  s->raw_image_buffer_size = buf_end - buf_ptr;
2477  /* nothing to do on SOI */
2478  break;
2479  case DHT:
2480  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2481  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2482  goto fail;
2483  }
2484  break;
2485  case SOF0:
2486  case SOF1:
2487  if (start_code == SOF0)
2489  else
2491  s->lossless = 0;
2492  s->ls = 0;
2493  s->progressive = 0;
2494  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2495  goto fail;
2496  break;
2497  case SOF2:
2499  s->lossless = 0;
2500  s->ls = 0;
2501  s->progressive = 1;
2502  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2503  goto fail;
2504  break;
2505  case SOF3:
2508  s->lossless = 1;
2509  s->ls = 0;
2510  s->progressive = 0;
2511  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2512  goto fail;
2513  break;
2514  case SOF48:
2517  s->lossless = 1;
2518  s->ls = 1;
2519  s->progressive = 0;
2520  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2521  goto fail;
2522  break;
2523  case LSE:
2524  if (!CONFIG_JPEGLS_DECODER ||
2525  (ret = ff_jpegls_decode_lse(s)) < 0)
2526  goto fail;
2527  if (ret == 1)
2528  goto redo_for_pal8;
2529  break;
2530  case EOI:
2531 eoi_parser:
2532  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2533  s->progressive && s->cur_scan && s->got_picture)
2535  s->cur_scan = 0;
2536  if (!s->got_picture) {
2537  av_log(avctx, AV_LOG_WARNING,
2538  "Found EOI before any SOF, ignoring\n");
2539  break;
2540  }
2541  if (s->interlaced) {
2542  s->bottom_field ^= 1;
2543  /* if not bottom field, do not output image yet */
2544  if (s->bottom_field == !s->interlace_polarity)
2545  break;
2546  }
2547  if (avctx->skip_frame == AVDISCARD_ALL) {
2548  s->got_picture = 0;
2549  goto the_end_no_picture;
2550  }
2551  if (avctx->hwaccel) {
2552  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2553  if (ret < 0)
2554  return ret;
2555 
2556  av_freep(&s->hwaccel_picture_private);
2557  }
2558  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2559  return ret;
2560  *got_frame = 1;
2561  s->got_picture = 0;
2562 
2563  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2564  int qp = FFMAX3(s->qscale[0],
2565  s->qscale[1],
2566  s->qscale[2]);
2567 
2568  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2569  }
2570 
2571  goto the_end;
2572  case SOS:
2573  s->raw_scan_buffer = buf_ptr;
2574  s->raw_scan_buffer_size = buf_end - buf_ptr;
2575 
2576  s->cur_scan++;
2577  if (avctx->skip_frame == AVDISCARD_ALL) {
2578  skip_bits(&s->gb, get_bits_left(&s->gb));
2579  break;
2580  }
2581 
2582  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2583  (avctx->err_recognition & AV_EF_EXPLODE))
2584  goto fail;
2585  break;
2586  case DRI:
2587  if ((ret = mjpeg_decode_dri(s)) < 0)
2588  return ret;
2589  break;
2590  case SOF5:
2591  case SOF6:
2592  case SOF7:
2593  case SOF9:
2594  case SOF10:
2595  case SOF11:
2596  case SOF13:
2597  case SOF14:
2598  case SOF15:
2599  case JPG:
2600  av_log(avctx, AV_LOG_ERROR,
2601  "mjpeg: unsupported coding type (%x)\n", start_code);
2602  break;
2603  }
2604 
2605 skip:
2606  /* eof process start code */
2607  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2608  av_log(avctx, AV_LOG_DEBUG,
2609  "marker parser used %d bytes (%d bits)\n",
2610  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2611  }
2612  if (s->got_picture && s->cur_scan) {
2613  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2614  goto eoi_parser;
2615  }
2616  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2617  return AVERROR_INVALIDDATA;
2618 fail:
2619  s->got_picture = 0;
2620  return ret;
2621 the_end:
2622 
2623  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2624 
2625  if (AV_RB32(s->upscale_h)) {
2626  int p;
2628  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2629  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2630  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2631  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2632  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2633  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2634  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2635  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2636  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2637  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2638  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2639  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2640  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2641  );
2642  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2643  if (ret)
2644  return ret;
2645 
2646  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2647  for (p = 0; p<s->nb_components; p++) {
2648  uint8_t *line = s->picture_ptr->data[p];
2649  int w = s->width;
2650  int h = s->height;
2651  if (!s->upscale_h[p])
2652  continue;
2653  if (p==1 || p==2) {
2654  w = AV_CEIL_RSHIFT(w, hshift);
2655  h = AV_CEIL_RSHIFT(h, vshift);
2656  }
2657  if (s->upscale_v[p] == 1)
2658  h = (h+1)>>1;
2659  av_assert0(w > 0);
2660  for (i = 0; i < h; i++) {
2661  if (s->upscale_h[p] == 1) {
2662  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2663  else line[w - 1] = line[(w - 1) / 2];
2664  for (index = w - 2; index > 0; index--) {
2665  if (is16bit)
2666  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2667  else
2668  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2669  }
2670  } else if (s->upscale_h[p] == 2) {
2671  if (is16bit) {
2672  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2673  if (w > 1)
2674  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2675  } else {
2676  line[w - 1] = line[(w - 1) / 3];
2677  if (w > 1)
2678  line[w - 2] = line[w - 1];
2679  }
2680  for (index = w - 3; index > 0; index--) {
2681  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2682  }
2683  } else if (s->upscale_h[p] == 4){
2684  if (is16bit) {
2685  uint16_t *line16 = (uint16_t *) line;
2686  line16[w - 1] = line16[(w - 1) >> 2];
2687  if (w > 1)
2688  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2689  if (w > 2)
2690  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2691  } else {
2692  line[w - 1] = line[(w - 1) >> 2];
2693  if (w > 1)
2694  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2695  if (w > 2)
2696  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2697  }
2698  for (index = w - 4; index > 0; index--)
2699  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2700  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2701  }
2702  line += s->linesize[p];
2703  }
2704  }
2705  }
2706  if (AV_RB32(s->upscale_v)) {
2707  int p;
2709  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2710  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2711  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2712  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2713  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2714  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2715  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2716  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2717  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2718  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2719  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2720  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2721  );
2722  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2723  if (ret)
2724  return ret;
2725 
2726  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2727  for (p = 0; p < s->nb_components; p++) {
2728  uint8_t *dst;
2729  int w = s->width;
2730  int h = s->height;
2731  if (!s->upscale_v[p])
2732  continue;
2733  if (p==1 || p==2) {
2734  w = AV_CEIL_RSHIFT(w, hshift);
2735  h = AV_CEIL_RSHIFT(h, vshift);
2736  }
2737  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2738  for (i = h - 1; i; i--) {
2739  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2740  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2741  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2742  memcpy(dst, src1, w);
2743  } else {
2744  for (index = 0; index < w; index++)
2745  dst[index] = (src1[index] + src2[index]) >> 1;
2746  }
2747  dst -= s->linesize[p];
2748  }
2749  }
2750  }
2751  if (s->flipped && !s->rgb) {
2752  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2753  if (ret)
2754  return ret;
2755 
2756  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2757  for (index=0; index<s->nb_components; index++) {
2758  int h = frame->height;
2759  if (index && index < 3)
2760  h = AV_CEIL_RSHIFT(h, vshift);
2761  if (frame->data[index]) {
2762  frame->data[index] += (h - 1) * frame->linesize[index];
2763  frame->linesize[index] *= -1;
2764  }
2765  }
2766  }
2767 
2768  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2769  av_assert0(s->nb_components == 3);
2770  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2771  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2772  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2773  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2774  }
2775 
2776  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2777  int w = s->picture_ptr->width;
2778  int h = s->picture_ptr->height;
2779  av_assert0(s->nb_components == 4);
2780  for (i=0; i<h; i++) {
2781  int j;
2782  uint8_t *dst[4];
2783  for (index=0; index<4; index++) {
2784  dst[index] = s->picture_ptr->data[index]
2785  + s->picture_ptr->linesize[index]*i;
2786  }
2787  for (j=0; j<w; j++) {
2788  int k = dst[3][j];
2789  int r = dst[0][j] * k;
2790  int g = dst[1][j] * k;
2791  int b = dst[2][j] * k;
2792  dst[0][j] = g*257 >> 16;
2793  dst[1][j] = b*257 >> 16;
2794  dst[2][j] = r*257 >> 16;
2795  }
2796  memset(dst[3], 255, w);
2797  }
2798  }
2799  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2800  int w = s->picture_ptr->width;
2801  int h = s->picture_ptr->height;
2802  av_assert0(s->nb_components == 4);
2803  for (i=0; i<h; i++) {
2804  int j;
2805  uint8_t *dst[4];
2806  for (index=0; index<4; index++) {
2807  dst[index] = s->picture_ptr->data[index]
2808  + s->picture_ptr->linesize[index]*i;
2809  }
2810  for (j=0; j<w; j++) {
2811  int k = dst[3][j];
2812  int r = (255 - dst[0][j]) * k;
2813  int g = (128 - dst[1][j]) * k;
2814  int b = (128 - dst[2][j]) * k;
2815  dst[0][j] = r*257 >> 16;
2816  dst[1][j] = (g*257 >> 16) + 128;
2817  dst[2][j] = (b*257 >> 16) + 128;
2818  }
2819  memset(dst[3], 255, w);
2820  }
2821  }
2822 
2823  if (s->stereo3d) {
2825  if (stereo) {
2826  stereo->type = s->stereo3d->type;
2827  stereo->flags = s->stereo3d->flags;
2828  }
2829  av_freep(&s->stereo3d);
2830  }
2831 
2832  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2833  AVFrameSideData *sd;
2834  size_t offset = 0;
2835  int total_size = 0;
2836  int i;
2837 
2838  /* Sum size of all parts. */
2839  for (i = 0; i < s->iccnum; i++)
2840  total_size += s->iccentries[i].length;
2841 
2842  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2843  if (ret < 0) {
2844  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2845  return ret;
2846  }
2847 
2848  if (sd) {
2849  /* Reassemble the parts, which are now in-order. */
2850  for (i = 0; i < s->iccnum; i++) {
2851  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2852  offset += s->iccentries[i].length;
2853  }
2854  }
2855  }
2856 
2857  if (e = av_dict_get(s->exif_metadata, "Orientation", e, AV_DICT_IGNORE_SUFFIX)) {
2858  char *value = e->value + strspn(e->value, " \n\t\r"), *endptr;
2859  int orientation = strtol(value, &endptr, 0);
2860 
2861  if (!*endptr) {
2862  AVFrameSideData *sd = NULL;
2863 
2864  if (orientation >= 2 && orientation <= 8) {
2865  int32_t *matrix;
2866 
2868  if (!sd) {
2869  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2870  return AVERROR(ENOMEM);
2871  }
2872 
2873  matrix = (int32_t *)sd->data;
2874 
2875  switch (orientation) {
2876  case 2:
2879  break;
2880  case 3:
2882  break;
2883  case 4:
2886  break;
2887  case 5:
2890  break;
2891  case 6:
2893  break;
2894  case 7:
2897  break;
2898  case 8:
2900  break;
2901  default:
2902  av_assert0(0);
2903  }
2904  }
2905  }
2906  }
2907 
2908  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2909  av_dict_free(&s->exif_metadata);
2910 
2911  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2912  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2913  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2914  avctx->coded_height > s->orig_height) {
2915  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2916  frame->crop_top = frame->height - avctx->height;
2917  }
2918 
2919 the_end_no_picture:
2920  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2921  buf_end - buf_ptr);
2922  return buf_ptr - buf;
2923 }
2924 
2925 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2926  AVPacket *avpkt)
2927 {
2928  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2929  avpkt, avpkt->data, avpkt->size);
2930 }
2931 
2932 
2933 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2934  * even without having called ff_mjpeg_decode_init(). */
2936 {
2937  MJpegDecodeContext *s = avctx->priv_data;
2938  int i, j;
2939 
2940  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2941  av_log(avctx, AV_LOG_INFO, "Single field\n");
2942  }
2943 
2944  if (s->picture) {
2945  av_frame_free(&s->picture);
2946  s->picture_ptr = NULL;
2947  } else if (s->picture_ptr)
2948  av_frame_unref(s->picture_ptr);
2949 
2950  av_frame_free(&s->smv_frame);
2951 
2952  av_freep(&s->buffer);
2953  av_freep(&s->stereo3d);
2954  av_freep(&s->ljpeg_buffer);
2955  s->ljpeg_buffer_size = 0;
2956 
2957  for (i = 0; i < 3; i++) {
2958  for (j = 0; j < 4; j++)
2959  ff_vlc_free(&s->vlcs[i][j]);
2960  }
2961  for (i = 0; i < MAX_COMPONENTS; i++) {
2962  av_freep(&s->blocks[i]);
2963  av_freep(&s->last_nnz[i]);
2964  }
2965  av_dict_free(&s->exif_metadata);
2966 
2968 
2969  av_freep(&s->hwaccel_picture_private);
2970  av_freep(&s->jls_state);
2971 
2972  return 0;
2973 }
2974 
2975 static void decode_flush(AVCodecContext *avctx)
2976 {
2977  MJpegDecodeContext *s = avctx->priv_data;
2978  s->got_picture = 0;
2979 
2980  s->smv_next_frame = 0;
2981  av_frame_unref(s->smv_frame);
2982 }
2983 
2984 #if CONFIG_MJPEG_DECODER
2985 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2986 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2987 static const AVOption options[] = {
2988  { "extern_huff", "Use external huffman table.",
2989  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2990  { NULL },
2991 };
2992 
2993 static const AVClass mjpegdec_class = {
2994  .class_name = "MJPEG decoder",
2995  .item_name = av_default_item_name,
2996  .option = options,
2997  .version = LIBAVUTIL_VERSION_INT,
2998 };
2999 
3000 const FFCodec ff_mjpeg_decoder = {
3001  .p.name = "mjpeg",
3002  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
3003  .p.type = AVMEDIA_TYPE_VIDEO,
3004  .p.id = AV_CODEC_ID_MJPEG,
3005  .priv_data_size = sizeof(MJpegDecodeContext),
3007  .close = ff_mjpeg_decode_end,
3009  .flush = decode_flush,
3010  .p.capabilities = AV_CODEC_CAP_DR1,
3011  .p.max_lowres = 3,
3012  .p.priv_class = &mjpegdec_class,
3013  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
3014  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3017  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3018 #if CONFIG_MJPEG_NVDEC_HWACCEL
3019  HWACCEL_NVDEC(mjpeg),
3020 #endif
3021 #if CONFIG_MJPEG_VAAPI_HWACCEL
3022  HWACCEL_VAAPI(mjpeg),
3023 #endif
3024  NULL
3025  },
3026 };
3027 #endif
3028 #if CONFIG_THP_DECODER
3029 const FFCodec ff_thp_decoder = {
3030  .p.name = "thp",
3031  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
3032  .p.type = AVMEDIA_TYPE_VIDEO,
3033  .p.id = AV_CODEC_ID_THP,
3034  .priv_data_size = sizeof(MJpegDecodeContext),
3036  .close = ff_mjpeg_decode_end,
3038  .flush = decode_flush,
3039  .p.capabilities = AV_CODEC_CAP_DR1,
3040  .p.max_lowres = 3,
3041  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
3042 };
3043 #endif
3044 
3045 #if CONFIG_SMVJPEG_DECODER
3046 // SMV JPEG just stacks several output frames into one JPEG picture
3047 // we handle that by setting up the cropping parameters appropriately
3048 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
3049 {
3050  MJpegDecodeContext *s = avctx->priv_data;
3051 
3052  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
3053 
3054  frame->width = avctx->coded_width;
3055  frame->height = avctx->coded_height;
3056  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3057  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3058 
3059  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3060  s->smv_frame->pts += s->smv_frame->duration;
3061  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3062 
3063  if (s->smv_next_frame == 0)
3064  av_frame_unref(s->smv_frame);
3065 }
3066 
3067 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3068 {
3069  MJpegDecodeContext *s = avctx->priv_data;
3070  AVPacket *const pkt = avctx->internal->in_pkt;
3071  int got_frame = 0;
3072  int ret;
3073 
3074  if (s->smv_next_frame > 0)
3075  goto return_frame;
3076 
3077  ret = ff_decode_get_packet(avctx, pkt);
3078  if (ret < 0)
3079  return ret;
3080 
3081  av_frame_unref(s->smv_frame);
3082 
3083  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3084  s->smv_frame->pkt_dts = pkt->dts;
3086  if (ret < 0)
3087  return ret;
3088 
3089  if (!got_frame)
3090  return AVERROR(EAGAIN);
3091 
3092  // packet duration covers all the frames in the packet
3093  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3094 
3095 return_frame:
3096  av_assert0(s->smv_frame->buf[0]);
3097  ret = av_frame_ref(frame, s->smv_frame);
3098  if (ret < 0)
3099  return ret;
3100 
3101  smv_process_frame(avctx, frame);
3102  return 0;
3103 }
3104 
3105 const FFCodec ff_smvjpeg_decoder = {
3106  .p.name = "smvjpeg",
3107  CODEC_LONG_NAME("SMV JPEG"),
3108  .p.type = AVMEDIA_TYPE_VIDEO,
3109  .p.id = AV_CODEC_ID_SMVJPEG,
3110  .priv_data_size = sizeof(MJpegDecodeContext),
3112  .close = ff_mjpeg_decode_end,
3113  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3114  .flush = decode_flush,
3115  .p.capabilities = AV_CODEC_CAP_DR1,
3116  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3118 };
3119 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:88
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:427
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1427
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:278
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:220
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:204
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:495
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:255
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:685
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1219
out
FILE * out
Definition: movenc.c:54
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1425
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:704
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2975
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:981
SOF0
@ SOF0
Definition: mjpeg.h:39
matrix
Definition: vc1dsp.c:42
src1
const pixel * src1
Definition: h264pred_template.c:421
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1420
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:574
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:259
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:114
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:88
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:220
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:222
AVFrame::width
int width
Definition: frame.h:412
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:521
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:683
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:66
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:522
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:708
AVOption
AVOption.
Definition: opt.h:346
b
#define b
Definition: input.c:41
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:815
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:148
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:225
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1397
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:604
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:263
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3002
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:240
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1271
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1441
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:454
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:123
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1819
fail
#define fail()
Definition: checkasm.h:179
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:523
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:108
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2370
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2171
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:60
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:502
val
static double val(void *priv, double ch)
Definition: aeval.c:78
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2990
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:633
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:462
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:76
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:182
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:243
aligned
static int aligned(int val)
Definition: dashdec.c:170
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:882
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:490
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1796
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:171
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:591
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:199
mask
static const uint16_t mask[17]
Definition: lzw.c:38
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1064
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:188
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:103
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:491
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
g
const char * g
Definition: vf_curves.c:127
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:354
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:489
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2356
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2935
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
frame
static AVFrame * frame
Definition: demux_decode.c:54
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:182
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:497
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:468
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
av_clip_int16
#define av_clip_int16
Definition: common.h:113
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:140
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:469
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1632
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:203
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:172
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:197
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:480
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
tiff_common.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:247
MJpegDecodeContext
Definition: mjpegdec.h:54
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1456
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:999
lowres
static int lowres
Definition: ffplay.c:333
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1577
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:718
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1854
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:507
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1568
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1099
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:523
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:312
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:50
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2925
av_bswap32
#define av_bswap32
Definition: bswap.h:28
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:900
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1667
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:175
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:264
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:1818
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:521
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
VD
#define VD
Definition: av1dec.c:1536
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:292
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2211
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:164
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:178
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2263
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:833
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1795
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:174
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
src2
const pixel * src2
Definition: h264pred_template.c:422
display.h
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:202
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1842
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:74
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:169
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:52
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1404
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:534
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:702
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:599
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:666
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:2030
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:973
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:577
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:29
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:177
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
pos
unsigned int pos
Definition: spdifenc.c:413
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1401
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:364
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2236
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:412
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:293
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:259
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:705
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:171
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1639
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:658
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:166
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1396
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:301
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:633
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1854
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
AVFrame::crop_top
size_t crop_top
Definition: frame.h:717
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
AVDictionaryEntry::value
char * value
Definition: dict.h:91
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:173
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:345