FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/display.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/avassert.h"
36 #include "libavutil/opt.h"
37 #include "avcodec.h"
38 #include "blockdsp.h"
39 #include "copy_block.h"
40 #include "decode.h"
41 #include "hwconfig.h"
42 #include "idctdsp.h"
43 #include "internal.h"
44 #include "jpegtables.h"
45 #include "mjpeg.h"
46 #include "mjpegdec.h"
47 #include "jpeglsdec.h"
48 #include "profiles.h"
49 #include "put_bits.h"
50 #include "tiff.h"
51 #include "exif.h"
52 #include "bytestream.h"
53 
54 
56 {
57  static const struct {
58  int class;
59  int index;
60  const uint8_t *bits;
61  const uint8_t *values;
62  int length;
63  } ht[] = {
65  ff_mjpeg_val_dc, 12 },
67  ff_mjpeg_val_dc, 12 },
76  };
77  int i, ret;
78 
79  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
80  ff_free_vlc(&s->vlcs[ht[i].class][ht[i].index]);
81  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
82  ht[i].bits, ht[i].values,
83  ht[i].class == 1, s->avctx);
84  if (ret < 0)
85  return ret;
86 
87  if (ht[i].class < 2) {
88  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
89  ht[i].bits + 1, 16);
90  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
91  ht[i].values, ht[i].length);
92  }
93  }
94 
95  return 0;
96 }
97 
98 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
99 {
100  s->buggy_avid = 1;
101  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
102  s->interlace_polarity = 1;
103  if (len > 14 && buf[12] == 2) /* 2 - PAL */
104  s->interlace_polarity = 0;
105  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
106  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
107 }
108 
109 static void init_idct(AVCodecContext *avctx)
110 {
111  MJpegDecodeContext *s = avctx->priv_data;
112 
113  ff_idctdsp_init(&s->idsp, avctx);
114  ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
116 }
117 
119 {
120  MJpegDecodeContext *s = avctx->priv_data;
121  int ret;
122 
123  if (!s->picture_ptr) {
124  s->picture = av_frame_alloc();
125  if (!s->picture)
126  return AVERROR(ENOMEM);
127  s->picture_ptr = s->picture;
128  }
129 
130  s->pkt = avctx->internal->in_pkt;
131 
132  s->avctx = avctx;
133  ff_blockdsp_init(&s->bdsp, avctx);
134  ff_hpeldsp_init(&s->hdsp, avctx->flags);
135  init_idct(avctx);
136  s->buffer_size = 0;
137  s->buffer = NULL;
138  s->start_code = -1;
139  s->first_picture = 1;
140  s->got_picture = 0;
141  s->orig_height = avctx->coded_height;
143  avctx->colorspace = AVCOL_SPC_BT470BG;
144  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
145 
146  if ((ret = init_default_huffman_tables(s)) < 0)
147  return ret;
148 
149  if (s->extern_huff) {
150  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
151  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
152  return ret;
153  if (ff_mjpeg_decode_dht(s)) {
154  av_log(avctx, AV_LOG_ERROR,
155  "error using external huffman table, switching back to internal\n");
156  if ((ret = init_default_huffman_tables(s)) < 0)
157  return ret;
158  }
159  }
160  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
161  s->interlace_polarity = 1; /* bottom field first */
162  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
163  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
164  if (avctx->codec_tag == AV_RL32("MJPG"))
165  s->interlace_polarity = 1;
166  }
167 
168  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
169  if (avctx->extradata_size >= 4)
170  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
171 
172  if (s->smv_frames_per_jpeg <= 0) {
173  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
174  return AVERROR_INVALIDDATA;
175  }
176 
177  s->smv_frame = av_frame_alloc();
178  if (!s->smv_frame)
179  return AVERROR(ENOMEM);
180  } else if (avctx->extradata_size > 8
181  && AV_RL32(avctx->extradata) == 0x2C
182  && AV_RL32(avctx->extradata+4) == 0x18) {
183  parse_avid(s, avctx->extradata, avctx->extradata_size);
184  }
185 
186  if (avctx->codec->id == AV_CODEC_ID_AMV)
187  s->flipped = 1;
188 
189  return 0;
190 }
191 
192 
193 /* quantize tables */
195 {
196  int len, index, i;
197 
198  len = get_bits(&s->gb, 16) - 2;
199 
200  if (8*len > get_bits_left(&s->gb)) {
201  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
202  return AVERROR_INVALIDDATA;
203  }
204 
205  while (len >= 65) {
206  int pr = get_bits(&s->gb, 4);
207  if (pr > 1) {
208  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
209  return AVERROR_INVALIDDATA;
210  }
211  index = get_bits(&s->gb, 4);
212  if (index >= 4)
213  return -1;
214  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
215  /* read quant table */
216  for (i = 0; i < 64; i++) {
217  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
218  if (s->quant_matrixes[index][i] == 0) {
219  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
220  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
221  if (s->avctx->err_recognition & AV_EF_EXPLODE)
222  return AVERROR_INVALIDDATA;
223  }
224  }
225 
226  // XXX FIXME fine-tune, and perhaps add dc too
227  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
228  s->quant_matrixes[index][8]) >> 1;
229  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
230  index, s->qscale[index]);
231  len -= 1 + 64 * (1+pr);
232  }
233  return 0;
234 }
235 
236 /* decode huffman tables and build VLC decoders */
238 {
239  int len, index, i, class, n, v;
240  uint8_t bits_table[17];
241  uint8_t val_table[256];
242  int ret = 0;
243 
244  len = get_bits(&s->gb, 16) - 2;
245 
246  if (8*len > get_bits_left(&s->gb)) {
247  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
248  return AVERROR_INVALIDDATA;
249  }
250 
251  while (len > 0) {
252  if (len < 17)
253  return AVERROR_INVALIDDATA;
254  class = get_bits(&s->gb, 4);
255  if (class >= 2)
256  return AVERROR_INVALIDDATA;
257  index = get_bits(&s->gb, 4);
258  if (index >= 4)
259  return AVERROR_INVALIDDATA;
260  n = 0;
261  for (i = 1; i <= 16; i++) {
262  bits_table[i] = get_bits(&s->gb, 8);
263  n += bits_table[i];
264  }
265  len -= 17;
266  if (len < n || n > 256)
267  return AVERROR_INVALIDDATA;
268 
269  for (i = 0; i < n; i++) {
270  v = get_bits(&s->gb, 8);
271  val_table[i] = v;
272  }
273  len -= n;
274 
275  /* build VLC and flush previous vlc if present */
276  ff_free_vlc(&s->vlcs[class][index]);
277  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
278  class, index, n);
279  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
280  val_table, class > 0, s->avctx)) < 0)
281  return ret;
282 
283  if (class > 0) {
284  ff_free_vlc(&s->vlcs[2][index]);
285  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
286  val_table, 0, s->avctx)) < 0)
287  return ret;
288  }
289 
290  for (i = 0; i < 16; i++)
291  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
292  for (i = 0; i < 256; i++)
293  s->raw_huffman_values[class][index][i] = val_table[i];
294  }
295  return 0;
296 }
297 
299 {
300  int len, nb_components, i, width, height, bits, ret, size_change;
301  unsigned pix_fmt_id;
302  int h_count[MAX_COMPONENTS] = { 0 };
303  int v_count[MAX_COMPONENTS] = { 0 };
304 
305  s->cur_scan = 0;
306  memset(s->upscale_h, 0, sizeof(s->upscale_h));
307  memset(s->upscale_v, 0, sizeof(s->upscale_v));
308 
309  len = get_bits(&s->gb, 16);
310  bits = get_bits(&s->gb, 8);
311 
312  if (bits > 16 || bits < 1) {
313  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
314  return AVERROR_INVALIDDATA;
315  }
316 
317  if (s->avctx->bits_per_raw_sample != bits) {
318  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
319  s->avctx->bits_per_raw_sample = bits;
320  init_idct(s->avctx);
321  }
322  if (s->pegasus_rct)
323  bits = 9;
324  if (bits == 9 && !s->pegasus_rct)
325  s->rct = 1; // FIXME ugly
326 
327  if(s->lossless && s->avctx->lowres){
328  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
329  return -1;
330  }
331 
332  height = get_bits(&s->gb, 16);
333  width = get_bits(&s->gb, 16);
334 
335  // HACK for odd_height.mov
336  if (s->interlaced && s->width == width && s->height == height + 1)
337  height= s->height;
338 
339  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
340  if (av_image_check_size(width, height, 0, s->avctx) < 0)
341  return AVERROR_INVALIDDATA;
342  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
343  return AVERROR_INVALIDDATA;
344 
345  nb_components = get_bits(&s->gb, 8);
346  if (nb_components <= 0 ||
347  nb_components > MAX_COMPONENTS)
348  return -1;
349  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
350  if (nb_components != s->nb_components) {
351  av_log(s->avctx, AV_LOG_ERROR,
352  "nb_components changing in interlaced picture\n");
353  return AVERROR_INVALIDDATA;
354  }
355  }
356  if (s->ls && !(bits <= 8 || nb_components == 1)) {
358  "JPEG-LS that is not <= 8 "
359  "bits/component or 16-bit gray");
360  return AVERROR_PATCHWELCOME;
361  }
362  if (len != 8 + 3 * nb_components) {
363  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
364  return AVERROR_INVALIDDATA;
365  }
366 
367  s->nb_components = nb_components;
368  s->h_max = 1;
369  s->v_max = 1;
370  for (i = 0; i < nb_components; i++) {
371  /* component id */
372  s->component_id[i] = get_bits(&s->gb, 8) - 1;
373  h_count[i] = get_bits(&s->gb, 4);
374  v_count[i] = get_bits(&s->gb, 4);
375  /* compute hmax and vmax (only used in interleaved case) */
376  if (h_count[i] > s->h_max)
377  s->h_max = h_count[i];
378  if (v_count[i] > s->v_max)
379  s->v_max = v_count[i];
380  s->quant_index[i] = get_bits(&s->gb, 8);
381  if (s->quant_index[i] >= 4) {
382  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
383  return AVERROR_INVALIDDATA;
384  }
385  if (!h_count[i] || !v_count[i]) {
386  av_log(s->avctx, AV_LOG_ERROR,
387  "Invalid sampling factor in component %d %d:%d\n",
388  i, h_count[i], v_count[i]);
389  return AVERROR_INVALIDDATA;
390  }
391 
392  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
393  i, h_count[i], v_count[i],
394  s->component_id[i], s->quant_index[i]);
395  }
396  if ( nb_components == 4
397  && s->component_id[0] == 'C' - 1
398  && s->component_id[1] == 'M' - 1
399  && s->component_id[2] == 'Y' - 1
400  && s->component_id[3] == 'K' - 1)
401  s->adobe_transform = 0;
402 
403  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
404  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
405  return AVERROR_PATCHWELCOME;
406  }
407 
408  if (s->bayer) {
409  if (nb_components == 2) {
410  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
411  width stored in their SOF3 markers is the width of each one. We only output
412  a single component, therefore we need to adjust the output image width. We
413  handle the deinterleaving (but not the debayering) in this file. */
414  width *= 2;
415  }
416  /* They can also contain 1 component, which is double the width and half the height
417  of the final image (rows are interleaved). We don't handle the decoding in this
418  file, but leave that to the TIFF/DNG decoder. */
419  }
420 
421  /* if different size, realloc/alloc picture */
422  if (width != s->width || height != s->height || bits != s->bits ||
423  memcmp(s->h_count, h_count, sizeof(h_count)) ||
424  memcmp(s->v_count, v_count, sizeof(v_count))) {
425  size_change = 1;
426 
427  s->width = width;
428  s->height = height;
429  s->bits = bits;
430  memcpy(s->h_count, h_count, sizeof(h_count));
431  memcpy(s->v_count, v_count, sizeof(v_count));
432  s->interlaced = 0;
433  s->got_picture = 0;
434 
435  /* test interlaced mode */
436  if (s->first_picture &&
437  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
438  s->orig_height != 0 &&
439  s->height < ((s->orig_height * 3) / 4)) {
440  s->interlaced = 1;
441  s->bottom_field = s->interlace_polarity;
442  s->picture_ptr->interlaced_frame = 1;
443  s->picture_ptr->top_field_first = !s->interlace_polarity;
444  height *= 2;
445  }
446 
447  ret = ff_set_dimensions(s->avctx, width, height);
448  if (ret < 0)
449  return ret;
450 
451  if ((s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
452  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
453  s->orig_height < height)
454  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
455 
456  s->first_picture = 0;
457  } else {
458  size_change = 0;
459  }
460 
461  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
462  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
463  if (s->avctx->height <= 0)
464  return AVERROR_INVALIDDATA;
465  }
466 
467  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
468  if (s->progressive) {
469  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
470  return AVERROR_INVALIDDATA;
471  }
472  } else {
473  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
474  s->rgb = 1;
475  else if (!s->lossless)
476  s->rgb = 0;
477  /* XXX: not complete test ! */
478  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
479  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
480  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
481  (s->h_count[3] << 4) | s->v_count[3];
482  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
483  /* NOTE we do not allocate pictures large enough for the possible
484  * padding of h/v_count being 4 */
485  if (!(pix_fmt_id & 0xD0D0D0D0))
486  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
487  if (!(pix_fmt_id & 0x0D0D0D0D))
488  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
489 
490  for (i = 0; i < 8; i++) {
491  int j = 6 + (i&1) - (i&6);
492  int is = (pix_fmt_id >> (4*i)) & 0xF;
493  int js = (pix_fmt_id >> (4*j)) & 0xF;
494 
495  if (is == 1 && js != 2 && (i < 2 || i > 5))
496  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
497  if (is == 1 && js != 2 && (i < 2 || i > 5))
498  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
499 
500  if (is == 1 && js == 2) {
501  if (i & 1) s->upscale_h[j/2] = 1;
502  else s->upscale_v[j/2] = 1;
503  }
504  }
505 
506  if (s->bayer) {
507  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
508  goto unk_pixfmt;
509  }
510 
511  switch (pix_fmt_id) {
512  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
513  if (!s->bayer)
514  goto unk_pixfmt;
515  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
516  break;
517  case 0x11111100:
518  if (s->rgb)
519  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
520  else {
521  if ( s->adobe_transform == 0
522  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
523  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
524  } else {
525  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
526  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
527  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
528  }
529  }
530  av_assert0(s->nb_components == 3);
531  break;
532  case 0x11111111:
533  if (s->rgb)
534  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
535  else {
536  if (s->adobe_transform == 0 && s->bits <= 8) {
537  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
538  } else {
539  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
540  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
541  }
542  }
543  av_assert0(s->nb_components == 4);
544  break;
545  case 0x22111122:
546  case 0x22111111:
547  if (s->adobe_transform == 0 && s->bits <= 8) {
548  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
549  s->upscale_v[1] = s->upscale_v[2] = 1;
550  s->upscale_h[1] = s->upscale_h[2] = 1;
551  } else if (s->adobe_transform == 2 && s->bits <= 8) {
552  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
553  s->upscale_v[1] = s->upscale_v[2] = 1;
554  s->upscale_h[1] = s->upscale_h[2] = 1;
555  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
556  } else {
557  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
558  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
559  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
560  }
561  av_assert0(s->nb_components == 4);
562  break;
563  case 0x12121100:
564  case 0x22122100:
565  case 0x21211100:
566  case 0x21112100:
567  case 0x22211200:
568  case 0x22221100:
569  case 0x22112200:
570  case 0x11222200:
571  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
572  else
573  goto unk_pixfmt;
574  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
575  break;
576  case 0x11000000:
577  case 0x13000000:
578  case 0x14000000:
579  case 0x31000000:
580  case 0x33000000:
581  case 0x34000000:
582  case 0x41000000:
583  case 0x43000000:
584  case 0x44000000:
585  if(s->bits <= 8)
586  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
587  else
588  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
589  break;
590  case 0x12111100:
591  case 0x14121200:
592  case 0x14111100:
593  case 0x22211100:
594  case 0x22112100:
595  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
596  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
597  else
598  goto unk_pixfmt;
599  s->upscale_v[0] = s->upscale_v[1] = 1;
600  } else {
601  if (pix_fmt_id == 0x14111100)
602  s->upscale_v[1] = s->upscale_v[2] = 1;
603  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
604  else
605  goto unk_pixfmt;
606  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
607  }
608  break;
609  case 0x21111100:
610  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
611  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
612  else
613  goto unk_pixfmt;
614  s->upscale_h[0] = s->upscale_h[1] = 1;
615  } else {
616  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
617  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
618  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
619  }
620  break;
621  case 0x31111100:
622  if (s->bits > 8)
623  goto unk_pixfmt;
624  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
625  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
626  s->upscale_h[1] = s->upscale_h[2] = 2;
627  break;
628  case 0x22121100:
629  case 0x22111200:
630  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
631  else
632  goto unk_pixfmt;
633  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
634  break;
635  case 0x22111100:
636  case 0x23111100:
637  case 0x42111100:
638  case 0x24111100:
639  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
640  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
641  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
642  if (pix_fmt_id == 0x42111100) {
643  if (s->bits > 8)
644  goto unk_pixfmt;
645  s->upscale_h[1] = s->upscale_h[2] = 1;
646  } else if (pix_fmt_id == 0x24111100) {
647  if (s->bits > 8)
648  goto unk_pixfmt;
649  s->upscale_v[1] = s->upscale_v[2] = 1;
650  } else if (pix_fmt_id == 0x23111100) {
651  if (s->bits > 8)
652  goto unk_pixfmt;
653  s->upscale_v[1] = s->upscale_v[2] = 2;
654  }
655  break;
656  case 0x41111100:
657  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
658  else
659  goto unk_pixfmt;
660  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
661  break;
662  default:
663  unk_pixfmt:
664  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
665  memset(s->upscale_h, 0, sizeof(s->upscale_h));
666  memset(s->upscale_v, 0, sizeof(s->upscale_v));
667  return AVERROR_PATCHWELCOME;
668  }
669  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
670  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
671  return AVERROR_PATCHWELCOME;
672  }
673  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
674  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
675  return AVERROR_PATCHWELCOME;
676  }
677  if (s->ls) {
678  memset(s->upscale_h, 0, sizeof(s->upscale_h));
679  memset(s->upscale_v, 0, sizeof(s->upscale_v));
680  if (s->nb_components == 3) {
681  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
682  } else if (s->nb_components != 1) {
683  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
684  return AVERROR_PATCHWELCOME;
685  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
686  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
687  else if (s->bits <= 8)
688  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
689  else
690  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
691  }
692 
693  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
694  if (!s->pix_desc) {
695  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
696  return AVERROR_BUG;
697  }
698 
699  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
700  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
701  } else {
702  enum AVPixelFormat pix_fmts[] = {
703 #if CONFIG_MJPEG_NVDEC_HWACCEL
705 #endif
706 #if CONFIG_MJPEG_VAAPI_HWACCEL
708 #endif
709  s->avctx->pix_fmt,
711  };
712  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
713  if (s->hwaccel_pix_fmt < 0)
714  return AVERROR(EINVAL);
715 
716  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
717  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
718  }
719 
720  if (s->avctx->skip_frame == AVDISCARD_ALL) {
721  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
722  s->picture_ptr->key_frame = 1;
723  s->got_picture = 1;
724  return 0;
725  }
726 
727  av_frame_unref(s->picture_ptr);
728  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
729  return -1;
730  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
731  s->picture_ptr->key_frame = 1;
732  s->got_picture = 1;
733 
734  // Lets clear the palette to avoid leaving uninitialized values in it
735  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
736  memset(s->picture_ptr->data[1], 0, 1024);
737 
738  for (i = 0; i < 4; i++)
739  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
740 
741  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
742  s->width, s->height, s->linesize[0], s->linesize[1],
743  s->interlaced, s->avctx->height);
744 
745  }
746 
747  if ((s->rgb && !s->lossless && !s->ls) ||
748  (!s->rgb && s->ls && s->nb_components > 1) ||
749  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
750  ) {
751  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
752  return AVERROR_PATCHWELCOME;
753  }
754 
755  /* totally blank picture as progressive JPEG will only add details to it */
756  if (s->progressive) {
757  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
758  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
759  for (i = 0; i < s->nb_components; i++) {
760  int size = bw * bh * s->h_count[i] * s->v_count[i];
761  av_freep(&s->blocks[i]);
762  av_freep(&s->last_nnz[i]);
763  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
764  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
765  if (!s->blocks[i] || !s->last_nnz[i])
766  return AVERROR(ENOMEM);
767  s->block_stride[i] = bw * s->h_count[i];
768  }
769  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
770  }
771 
772  if (s->avctx->hwaccel) {
773  s->hwaccel_picture_private =
774  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
775  if (!s->hwaccel_picture_private)
776  return AVERROR(ENOMEM);
777 
778  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
779  s->raw_image_buffer_size);
780  if (ret < 0)
781  return ret;
782  }
783 
784  return 0;
785 }
786 
787 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
788 {
789  int code;
790  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
791  if (code < 0 || code > 16) {
792  av_log(s->avctx, AV_LOG_WARNING,
793  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
794  0, dc_index, &s->vlcs[0][dc_index]);
795  return 0xfffff;
796  }
797 
798  if (code)
799  return get_xbits(&s->gb, code);
800  else
801  return 0;
802 }
803 
804 /* decode block and dequantize */
805 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
806  int dc_index, int ac_index, uint16_t *quant_matrix)
807 {
808  int code, i, j, level, val;
809 
810  /* DC coef */
811  val = mjpeg_decode_dc(s, dc_index);
812  if (val == 0xfffff) {
813  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
814  return AVERROR_INVALIDDATA;
815  }
816  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
817  val = av_clip_int16(val);
818  s->last_dc[component] = val;
819  block[0] = val;
820  /* AC coefs */
821  i = 0;
822  {OPEN_READER(re, &s->gb);
823  do {
824  UPDATE_CACHE(re, &s->gb);
825  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
826 
827  i += ((unsigned)code) >> 4;
828  code &= 0xf;
829  if (code) {
830  if (code > MIN_CACHE_BITS - 16)
831  UPDATE_CACHE(re, &s->gb);
832 
833  {
834  int cache = GET_CACHE(re, &s->gb);
835  int sign = (~cache) >> 31;
836  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
837  }
838 
839  LAST_SKIP_BITS(re, &s->gb, code);
840 
841  if (i > 63) {
842  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
843  return AVERROR_INVALIDDATA;
844  }
845  j = s->scantable.permutated[i];
846  block[j] = level * quant_matrix[i];
847  }
848  } while (i < 63);
849  CLOSE_READER(re, &s->gb);}
850 
851  return 0;
852 }
853 
855  int component, int dc_index,
856  uint16_t *quant_matrix, int Al)
857 {
858  unsigned val;
859  s->bdsp.clear_block(block);
860  val = mjpeg_decode_dc(s, dc_index);
861  if (val == 0xfffff) {
862  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
863  return AVERROR_INVALIDDATA;
864  }
865  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
866  s->last_dc[component] = val;
867  block[0] = val;
868  return 0;
869 }
870 
871 /* decode block and dequantize - progressive JPEG version */
873  uint8_t *last_nnz, int ac_index,
874  uint16_t *quant_matrix,
875  int ss, int se, int Al, int *EOBRUN)
876 {
877  int code, i, j, val, run;
878  unsigned level;
879 
880  if (*EOBRUN) {
881  (*EOBRUN)--;
882  return 0;
883  }
884 
885  {
886  OPEN_READER(re, &s->gb);
887  for (i = ss; ; i++) {
888  UPDATE_CACHE(re, &s->gb);
889  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
890 
891  run = ((unsigned) code) >> 4;
892  code &= 0xF;
893  if (code) {
894  i += run;
895  if (code > MIN_CACHE_BITS - 16)
896  UPDATE_CACHE(re, &s->gb);
897 
898  {
899  int cache = GET_CACHE(re, &s->gb);
900  int sign = (~cache) >> 31;
901  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
902  }
903 
904  LAST_SKIP_BITS(re, &s->gb, code);
905 
906  if (i >= se) {
907  if (i == se) {
908  j = s->scantable.permutated[se];
909  block[j] = level * (quant_matrix[se] << Al);
910  break;
911  }
912  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
913  return AVERROR_INVALIDDATA;
914  }
915  j = s->scantable.permutated[i];
916  block[j] = level * (quant_matrix[i] << Al);
917  } else {
918  if (run == 0xF) {// ZRL - skip 15 coefficients
919  i += 15;
920  if (i >= se) {
921  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
922  return AVERROR_INVALIDDATA;
923  }
924  } else {
925  val = (1 << run);
926  if (run) {
927  UPDATE_CACHE(re, &s->gb);
928  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
929  LAST_SKIP_BITS(re, &s->gb, run);
930  }
931  *EOBRUN = val - 1;
932  break;
933  }
934  }
935  }
936  CLOSE_READER(re, &s->gb);
937  }
938 
939  if (i > *last_nnz)
940  *last_nnz = i;
941 
942  return 0;
943 }
944 
945 #define REFINE_BIT(j) { \
946  UPDATE_CACHE(re, &s->gb); \
947  sign = block[j] >> 15; \
948  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
949  ((quant_matrix[i] ^ sign) - sign) << Al; \
950  LAST_SKIP_BITS(re, &s->gb, 1); \
951 }
952 
953 #define ZERO_RUN \
954 for (; ; i++) { \
955  if (i > last) { \
956  i += run; \
957  if (i > se) { \
958  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
959  return -1; \
960  } \
961  break; \
962  } \
963  j = s->scantable.permutated[i]; \
964  if (block[j]) \
965  REFINE_BIT(j) \
966  else if (run-- == 0) \
967  break; \
968 }
969 
970 /* decode block and dequantize - progressive JPEG refinement pass */
972  uint8_t *last_nnz,
973  int ac_index, uint16_t *quant_matrix,
974  int ss, int se, int Al, int *EOBRUN)
975 {
976  int code, i = ss, j, sign, val, run;
977  int last = FFMIN(se, *last_nnz);
978 
979  OPEN_READER(re, &s->gb);
980  if (*EOBRUN) {
981  (*EOBRUN)--;
982  } else {
983  for (; ; i++) {
984  UPDATE_CACHE(re, &s->gb);
985  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
986 
987  if (code & 0xF) {
988  run = ((unsigned) code) >> 4;
989  UPDATE_CACHE(re, &s->gb);
990  val = SHOW_UBITS(re, &s->gb, 1);
991  LAST_SKIP_BITS(re, &s->gb, 1);
992  ZERO_RUN;
993  j = s->scantable.permutated[i];
994  val--;
995  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
996  if (i == se) {
997  if (i > *last_nnz)
998  *last_nnz = i;
999  CLOSE_READER(re, &s->gb);
1000  return 0;
1001  }
1002  } else {
1003  run = ((unsigned) code) >> 4;
1004  if (run == 0xF) {
1005  ZERO_RUN;
1006  } else {
1007  val = run;
1008  run = (1 << run);
1009  if (val) {
1010  UPDATE_CACHE(re, &s->gb);
1011  run += SHOW_UBITS(re, &s->gb, val);
1012  LAST_SKIP_BITS(re, &s->gb, val);
1013  }
1014  *EOBRUN = run - 1;
1015  break;
1016  }
1017  }
1018  }
1019 
1020  if (i > *last_nnz)
1021  *last_nnz = i;
1022  }
1023 
1024  for (; i <= last; i++) {
1025  j = s->scantable.permutated[i];
1026  if (block[j])
1027  REFINE_BIT(j)
1028  }
1029  CLOSE_READER(re, &s->gb);
1030 
1031  return 0;
1032 }
1033 #undef REFINE_BIT
1034 #undef ZERO_RUN
1035 
1036 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1037 {
1038  int i;
1039  int reset = 0;
1040 
1041  if (s->restart_interval) {
1042  s->restart_count--;
1043  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1044  align_get_bits(&s->gb);
1045  for (i = 0; i < nb_components; i++) /* reset dc */
1046  s->last_dc[i] = (4 << s->bits);
1047  }
1048 
1049  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1050  /* skip RSTn */
1051  if (s->restart_count == 0) {
1052  if( show_bits(&s->gb, i) == (1 << i) - 1
1053  || show_bits(&s->gb, i) == 0xFF) {
1054  int pos = get_bits_count(&s->gb);
1055  align_get_bits(&s->gb);
1056  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1057  skip_bits(&s->gb, 8);
1058  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1059  for (i = 0; i < nb_components; i++) /* reset dc */
1060  s->last_dc[i] = (4 << s->bits);
1061  reset = 1;
1062  } else
1063  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1064  }
1065  }
1066  }
1067  return reset;
1068 }
1069 
1070 /* Handles 1 to 4 components */
1071 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1072 {
1073  int i, mb_x, mb_y;
1074  unsigned width;
1075  uint16_t (*buffer)[4];
1076  int left[4], top[4], topleft[4];
1077  const int linesize = s->linesize[0];
1078  const int mask = ((1 << s->bits) - 1) << point_transform;
1079  int resync_mb_y = 0;
1080  int resync_mb_x = 0;
1081  int vpred[6];
1082 
1083  if (!s->bayer && s->nb_components < 3)
1084  return AVERROR_INVALIDDATA;
1085  if (s->bayer && s->nb_components > 2)
1086  return AVERROR_INVALIDDATA;
1087  if (s->nb_components <= 0 || s->nb_components > 4)
1088  return AVERROR_INVALIDDATA;
1089  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1090  return AVERROR_INVALIDDATA;
1091 
1092 
1093  s->restart_count = s->restart_interval;
1094 
1095  if (s->restart_interval == 0)
1096  s->restart_interval = INT_MAX;
1097 
1098  if (s->bayer)
1099  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1100  else
1101  width = s->mb_width;
1102 
1103  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1104  if (!s->ljpeg_buffer)
1105  return AVERROR(ENOMEM);
1106 
1107  buffer = s->ljpeg_buffer;
1108 
1109  for (i = 0; i < 4; i++)
1110  buffer[0][i] = 1 << (s->bits - 1);
1111 
1112  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1113  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1114 
1115  if (s->interlaced && s->bottom_field)
1116  ptr += linesize >> 1;
1117 
1118  for (i = 0; i < 4; i++)
1119  top[i] = left[i] = topleft[i] = buffer[0][i];
1120 
1121  if ((mb_y * s->width) % s->restart_interval == 0) {
1122  for (i = 0; i < 6; i++)
1123  vpred[i] = 1 << (s->bits-1);
1124  }
1125 
1126  for (mb_x = 0; mb_x < width; mb_x++) {
1127  int modified_predictor = predictor;
1128 
1129  if (get_bits_left(&s->gb) < 1) {
1130  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1131  return AVERROR_INVALIDDATA;
1132  }
1133 
1134  if (s->restart_interval && !s->restart_count){
1135  s->restart_count = s->restart_interval;
1136  resync_mb_x = mb_x;
1137  resync_mb_y = mb_y;
1138  for(i=0; i<4; i++)
1139  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1140  }
1141  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1142  modified_predictor = 1;
1143 
1144  for (i=0;i<nb_components;i++) {
1145  int pred, dc;
1146 
1147  topleft[i] = top[i];
1148  top[i] = buffer[mb_x][i];
1149 
1150  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1151  if(dc == 0xFFFFF)
1152  return -1;
1153 
1154  if (!s->bayer || mb_x) {
1155  pred = left[i];
1156  } else { /* This path runs only for the first line in bayer images */
1157  vpred[i] += dc;
1158  pred = vpred[i] - dc;
1159  }
1160 
1161  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1162 
1163  left[i] = buffer[mb_x][i] =
1164  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1165  }
1166 
1167  if (s->restart_interval && !--s->restart_count) {
1168  align_get_bits(&s->gb);
1169  skip_bits(&s->gb, 16); /* skip RSTn */
1170  }
1171  }
1172  if (s->rct && s->nb_components == 4) {
1173  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1174  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1175  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1176  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1177  ptr[4*mb_x + 0] = buffer[mb_x][3];
1178  }
1179  } else if (s->nb_components == 4) {
1180  for(i=0; i<nb_components; i++) {
1181  int c= s->comp_index[i];
1182  if (s->bits <= 8) {
1183  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1184  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1185  }
1186  } else if(s->bits == 9) {
1187  return AVERROR_PATCHWELCOME;
1188  } else {
1189  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1190  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1191  }
1192  }
1193  }
1194  } else if (s->rct) {
1195  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1196  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1197  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1198  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1199  }
1200  } else if (s->pegasus_rct) {
1201  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1202  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1203  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1204  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1205  }
1206  } else if (s->bayer) {
1207  if (nb_components == 1) {
1208  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1209  for (mb_x = 0; mb_x < width; mb_x++)
1210  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1211  } else if (nb_components == 2) {
1212  for (mb_x = 0; mb_x < width; mb_x++) {
1213  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1214  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1215  }
1216  }
1217  } else {
1218  for(i=0; i<nb_components; i++) {
1219  int c= s->comp_index[i];
1220  if (s->bits <= 8) {
1221  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1222  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1223  }
1224  } else if(s->bits == 9) {
1225  return AVERROR_PATCHWELCOME;
1226  } else {
1227  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1228  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1229  }
1230  }
1231  }
1232  }
1233  }
1234  return 0;
1235 }
1236 
1238  int point_transform, int nb_components)
1239 {
1240  int i, mb_x, mb_y, mask;
1241  int bits= (s->bits+7)&~7;
1242  int resync_mb_y = 0;
1243  int resync_mb_x = 0;
1244 
1245  point_transform += bits - s->bits;
1246  mask = ((1 << s->bits) - 1) << point_transform;
1247 
1248  av_assert0(nb_components>=1 && nb_components<=4);
1249 
1250  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1251  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1252  if (get_bits_left(&s->gb) < 1) {
1253  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1254  return AVERROR_INVALIDDATA;
1255  }
1256  if (s->restart_interval && !s->restart_count){
1257  s->restart_count = s->restart_interval;
1258  resync_mb_x = mb_x;
1259  resync_mb_y = mb_y;
1260  }
1261 
1262  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1263  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1264  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1265  for (i = 0; i < nb_components; i++) {
1266  uint8_t *ptr;
1267  uint16_t *ptr16;
1268  int n, h, v, x, y, c, j, linesize;
1269  n = s->nb_blocks[i];
1270  c = s->comp_index[i];
1271  h = s->h_scount[i];
1272  v = s->v_scount[i];
1273  x = 0;
1274  y = 0;
1275  linesize= s->linesize[c];
1276 
1277  if(bits>8) linesize /= 2;
1278 
1279  for(j=0; j<n; j++) {
1280  int pred, dc;
1281 
1282  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1283  if(dc == 0xFFFFF)
1284  return -1;
1285  if ( h * mb_x + x >= s->width
1286  || v * mb_y + y >= s->height) {
1287  // Nothing to do
1288  } else if (bits<=8) {
1289  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1290  if(y==0 && toprow){
1291  if(x==0 && leftcol){
1292  pred= 1 << (bits - 1);
1293  }else{
1294  pred= ptr[-1];
1295  }
1296  }else{
1297  if(x==0 && leftcol){
1298  pred= ptr[-linesize];
1299  }else{
1300  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1301  }
1302  }
1303 
1304  if (s->interlaced && s->bottom_field)
1305  ptr += linesize >> 1;
1306  pred &= mask;
1307  *ptr= pred + ((unsigned)dc << point_transform);
1308  }else{
1309  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1310  if(y==0 && toprow){
1311  if(x==0 && leftcol){
1312  pred= 1 << (bits - 1);
1313  }else{
1314  pred= ptr16[-1];
1315  }
1316  }else{
1317  if(x==0 && leftcol){
1318  pred= ptr16[-linesize];
1319  }else{
1320  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1321  }
1322  }
1323 
1324  if (s->interlaced && s->bottom_field)
1325  ptr16 += linesize >> 1;
1326  pred &= mask;
1327  *ptr16= pred + ((unsigned)dc << point_transform);
1328  }
1329  if (++x == h) {
1330  x = 0;
1331  y++;
1332  }
1333  }
1334  }
1335  } else {
1336  for (i = 0; i < nb_components; i++) {
1337  uint8_t *ptr;
1338  uint16_t *ptr16;
1339  int n, h, v, x, y, c, j, linesize, dc;
1340  n = s->nb_blocks[i];
1341  c = s->comp_index[i];
1342  h = s->h_scount[i];
1343  v = s->v_scount[i];
1344  x = 0;
1345  y = 0;
1346  linesize = s->linesize[c];
1347 
1348  if(bits>8) linesize /= 2;
1349 
1350  for (j = 0; j < n; j++) {
1351  int pred;
1352 
1353  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1354  if(dc == 0xFFFFF)
1355  return -1;
1356  if ( h * mb_x + x >= s->width
1357  || v * mb_y + y >= s->height) {
1358  // Nothing to do
1359  } else if (bits<=8) {
1360  ptr = s->picture_ptr->data[c] +
1361  (linesize * (v * mb_y + y)) +
1362  (h * mb_x + x); //FIXME optimize this crap
1363  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1364 
1365  pred &= mask;
1366  *ptr = pred + ((unsigned)dc << point_transform);
1367  }else{
1368  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1369  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1370 
1371  pred &= mask;
1372  *ptr16= pred + ((unsigned)dc << point_transform);
1373  }
1374 
1375  if (++x == h) {
1376  x = 0;
1377  y++;
1378  }
1379  }
1380  }
1381  }
1382  if (s->restart_interval && !--s->restart_count) {
1383  align_get_bits(&s->gb);
1384  skip_bits(&s->gb, 16); /* skip RSTn */
1385  }
1386  }
1387  }
1388  return 0;
1389 }
1390 
1392  uint8_t *dst, const uint8_t *src,
1393  int linesize, int lowres)
1394 {
1395  switch (lowres) {
1396  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1397  break;
1398  case 1: copy_block4(dst, src, linesize, linesize, 4);
1399  break;
1400  case 2: copy_block2(dst, src, linesize, linesize, 2);
1401  break;
1402  case 3: *dst = *src;
1403  break;
1404  }
1405 }
1406 
1407 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1408 {
1409  int block_x, block_y;
1410  int size = 8 >> s->avctx->lowres;
1411  if (s->bits > 8) {
1412  for (block_y=0; block_y<size; block_y++)
1413  for (block_x=0; block_x<size; block_x++)
1414  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1415  } else {
1416  for (block_y=0; block_y<size; block_y++)
1417  for (block_x=0; block_x<size; block_x++)
1418  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1419  }
1420 }
1421 
1422 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1423  int Al, const uint8_t *mb_bitmask,
1424  int mb_bitmask_size,
1425  const AVFrame *reference)
1426 {
1427  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1428  uint8_t *data[MAX_COMPONENTS];
1429  const uint8_t *reference_data[MAX_COMPONENTS];
1430  int linesize[MAX_COMPONENTS];
1431  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1432  int bytes_per_pixel = 1 + (s->bits > 8);
1433 
1434  if (mb_bitmask) {
1435  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1436  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1437  return AVERROR_INVALIDDATA;
1438  }
1439  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1440  }
1441 
1442  s->restart_count = 0;
1443 
1444  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1445  &chroma_v_shift);
1446  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1447  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1448 
1449  for (i = 0; i < nb_components; i++) {
1450  int c = s->comp_index[i];
1451  data[c] = s->picture_ptr->data[c];
1452  reference_data[c] = reference ? reference->data[c] : NULL;
1453  linesize[c] = s->linesize[c];
1454  s->coefs_finished[c] |= 1;
1455  }
1456 
1457  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1458  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1459  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1460 
1461  if (s->restart_interval && !s->restart_count)
1462  s->restart_count = s->restart_interval;
1463 
1464  if (get_bits_left(&s->gb) < 0) {
1465  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1466  -get_bits_left(&s->gb));
1467  return AVERROR_INVALIDDATA;
1468  }
1469  for (i = 0; i < nb_components; i++) {
1470  uint8_t *ptr;
1471  int n, h, v, x, y, c, j;
1472  int block_offset;
1473  n = s->nb_blocks[i];
1474  c = s->comp_index[i];
1475  h = s->h_scount[i];
1476  v = s->v_scount[i];
1477  x = 0;
1478  y = 0;
1479  for (j = 0; j < n; j++) {
1480  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1481  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1482 
1483  if (s->interlaced && s->bottom_field)
1484  block_offset += linesize[c] >> 1;
1485  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1486  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1487  ptr = data[c] + block_offset;
1488  } else
1489  ptr = NULL;
1490  if (!s->progressive) {
1491  if (copy_mb) {
1492  if (ptr)
1493  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1494  linesize[c], s->avctx->lowres);
1495 
1496  } else {
1497  s->bdsp.clear_block(s->block);
1498  if (decode_block(s, s->block, i,
1499  s->dc_index[i], s->ac_index[i],
1500  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1501  av_log(s->avctx, AV_LOG_ERROR,
1502  "error y=%d x=%d\n", mb_y, mb_x);
1503  return AVERROR_INVALIDDATA;
1504  }
1505  if (ptr) {
1506  s->idsp.idct_put(ptr, linesize[c], s->block);
1507  if (s->bits & 7)
1508  shift_output(s, ptr, linesize[c]);
1509  }
1510  }
1511  } else {
1512  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1513  (h * mb_x + x);
1514  int16_t *block = s->blocks[c][block_idx];
1515  if (Ah)
1516  block[0] += get_bits1(&s->gb) *
1517  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1518  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1519  s->quant_matrixes[s->quant_sindex[i]],
1520  Al) < 0) {
1521  av_log(s->avctx, AV_LOG_ERROR,
1522  "error y=%d x=%d\n", mb_y, mb_x);
1523  return AVERROR_INVALIDDATA;
1524  }
1525  }
1526  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1527  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1528  mb_x, mb_y, x, y, c, s->bottom_field,
1529  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1530  if (++x == h) {
1531  x = 0;
1532  y++;
1533  }
1534  }
1535  }
1536 
1537  handle_rstn(s, nb_components);
1538  }
1539  }
1540  return 0;
1541 }
1542 
1544  int se, int Ah, int Al)
1545 {
1546  int mb_x, mb_y;
1547  int EOBRUN = 0;
1548  int c = s->comp_index[0];
1549  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1550 
1551  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1552  if (se < ss || se > 63) {
1553  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1554  return AVERROR_INVALIDDATA;
1555  }
1556 
1557  // s->coefs_finished is a bitmask for coefficients coded
1558  // ss and se are parameters telling start and end coefficients
1559  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1560 
1561  s->restart_count = 0;
1562 
1563  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1564  int block_idx = mb_y * s->block_stride[c];
1565  int16_t (*block)[64] = &s->blocks[c][block_idx];
1566  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1567  if (get_bits_left(&s->gb) <= 0) {
1568  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1569  return AVERROR_INVALIDDATA;
1570  }
1571  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1572  int ret;
1573  if (s->restart_interval && !s->restart_count)
1574  s->restart_count = s->restart_interval;
1575 
1576  if (Ah)
1577  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1578  quant_matrix, ss, se, Al, &EOBRUN);
1579  else
1580  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1581  quant_matrix, ss, se, Al, &EOBRUN);
1582 
1583  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1585  if (ret < 0) {
1586  av_log(s->avctx, AV_LOG_ERROR,
1587  "error y=%d x=%d\n", mb_y, mb_x);
1588  return AVERROR_INVALIDDATA;
1589  }
1590 
1591  if (handle_rstn(s, 0))
1592  EOBRUN = 0;
1593  }
1594  }
1595  return 0;
1596 }
1597 
1599 {
1600  int mb_x, mb_y;
1601  int c;
1602  const int bytes_per_pixel = 1 + (s->bits > 8);
1603  const int block_size = s->lossless ? 1 : 8;
1604 
1605  for (c = 0; c < s->nb_components; c++) {
1606  uint8_t *data = s->picture_ptr->data[c];
1607  int linesize = s->linesize[c];
1608  int h = s->h_max / s->h_count[c];
1609  int v = s->v_max / s->v_count[c];
1610  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1611  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1612 
1613  if (~s->coefs_finished[c])
1614  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1615 
1616  if (s->interlaced && s->bottom_field)
1617  data += linesize >> 1;
1618 
1619  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1620  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1621  int block_idx = mb_y * s->block_stride[c];
1622  int16_t (*block)[64] = &s->blocks[c][block_idx];
1623  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1624  s->idsp.idct_put(ptr, linesize, *block);
1625  if (s->bits & 7)
1626  shift_output(s, ptr, linesize);
1627  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1628  }
1629  }
1630  }
1631 }
1632 
1633 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1634  int mb_bitmask_size, const AVFrame *reference)
1635 {
1636  int len, nb_components, i, h, v, predictor, point_transform;
1637  int index, id, ret;
1638  const int block_size = s->lossless ? 1 : 8;
1639  int ilv, prev_shift;
1640 
1641  if (!s->got_picture) {
1642  av_log(s->avctx, AV_LOG_WARNING,
1643  "Can not process SOS before SOF, skipping\n");
1644  return -1;
1645  }
1646 
1647  if (reference) {
1648  if (reference->width != s->picture_ptr->width ||
1649  reference->height != s->picture_ptr->height ||
1650  reference->format != s->picture_ptr->format) {
1651  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1652  return AVERROR_INVALIDDATA;
1653  }
1654  }
1655 
1656  /* XXX: verify len field validity */
1657  len = get_bits(&s->gb, 16);
1658  nb_components = get_bits(&s->gb, 8);
1659  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1661  "decode_sos: nb_components (%d)",
1662  nb_components);
1663  return AVERROR_PATCHWELCOME;
1664  }
1665  if (len != 6 + 2 * nb_components) {
1666  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1667  return AVERROR_INVALIDDATA;
1668  }
1669  for (i = 0; i < nb_components; i++) {
1670  id = get_bits(&s->gb, 8) - 1;
1671  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1672  /* find component index */
1673  for (index = 0; index < s->nb_components; index++)
1674  if (id == s->component_id[index])
1675  break;
1676  if (index == s->nb_components) {
1677  av_log(s->avctx, AV_LOG_ERROR,
1678  "decode_sos: index(%d) out of components\n", index);
1679  return AVERROR_INVALIDDATA;
1680  }
1681  /* Metasoft MJPEG codec has Cb and Cr swapped */
1682  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1683  && nb_components == 3 && s->nb_components == 3 && i)
1684  index = 3 - i;
1685 
1686  s->quant_sindex[i] = s->quant_index[index];
1687  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1688  s->h_scount[i] = s->h_count[index];
1689  s->v_scount[i] = s->v_count[index];
1690 
1691  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1692  index = (index+2)%3;
1693 
1694  s->comp_index[i] = index;
1695 
1696  s->dc_index[i] = get_bits(&s->gb, 4);
1697  s->ac_index[i] = get_bits(&s->gb, 4);
1698 
1699  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1700  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1701  goto out_of_range;
1702  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1703  goto out_of_range;
1704  }
1705 
1706  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1707  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1708  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1709  prev_shift = get_bits(&s->gb, 4); /* Ah */
1710  point_transform = get_bits(&s->gb, 4); /* Al */
1711  }else
1712  prev_shift = point_transform = 0;
1713 
1714  if (nb_components > 1) {
1715  /* interleaved stream */
1716  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1717  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1718  } else if (!s->ls) { /* skip this for JPEG-LS */
1719  h = s->h_max / s->h_scount[0];
1720  v = s->v_max / s->v_scount[0];
1721  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1722  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1723  s->nb_blocks[0] = 1;
1724  s->h_scount[0] = 1;
1725  s->v_scount[0] = 1;
1726  }
1727 
1728  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1729  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1730  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1731  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1732  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1733 
1734 
1735  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1736  for (i = s->mjpb_skiptosod; i > 0; i--)
1737  skip_bits(&s->gb, 8);
1738 
1739 next_field:
1740  for (i = 0; i < nb_components; i++)
1741  s->last_dc[i] = (4 << s->bits);
1742 
1743  if (s->avctx->hwaccel) {
1744  int bytes_to_start = get_bits_count(&s->gb) / 8;
1745  av_assert0(bytes_to_start >= 0 &&
1746  s->raw_scan_buffer_size >= bytes_to_start);
1747 
1748  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1749  s->raw_scan_buffer + bytes_to_start,
1750  s->raw_scan_buffer_size - bytes_to_start);
1751  if (ret < 0)
1752  return ret;
1753 
1754  } else if (s->lossless) {
1755  av_assert0(s->picture_ptr == s->picture);
1756  if (CONFIG_JPEGLS_DECODER && s->ls) {
1757 // for () {
1758 // reset_ls_coding_parameters(s, 0);
1759 
1761  point_transform, ilv)) < 0)
1762  return ret;
1763  } else {
1764  if (s->rgb || s->bayer) {
1765  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1766  return ret;
1767  } else {
1769  point_transform,
1770  nb_components)) < 0)
1771  return ret;
1772  }
1773  }
1774  } else {
1775  if (s->progressive && predictor) {
1776  av_assert0(s->picture_ptr == s->picture);
1778  ilv, prev_shift,
1779  point_transform)) < 0)
1780  return ret;
1781  } else {
1782  if ((ret = mjpeg_decode_scan(s, nb_components,
1783  prev_shift, point_transform,
1784  mb_bitmask, mb_bitmask_size, reference)) < 0)
1785  return ret;
1786  }
1787  }
1788 
1789  if (s->interlaced &&
1790  get_bits_left(&s->gb) > 32 &&
1791  show_bits(&s->gb, 8) == 0xFF) {
1792  GetBitContext bak = s->gb;
1793  align_get_bits(&bak);
1794  if (show_bits(&bak, 16) == 0xFFD1) {
1795  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1796  s->gb = bak;
1797  skip_bits(&s->gb, 16);
1798  s->bottom_field ^= 1;
1799 
1800  goto next_field;
1801  }
1802  }
1803 
1804  emms_c();
1805  return 0;
1806  out_of_range:
1807  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1808  return AVERROR_INVALIDDATA;
1809 }
1810 
1812 {
1813  if (get_bits(&s->gb, 16) != 4)
1814  return AVERROR_INVALIDDATA;
1815  s->restart_interval = get_bits(&s->gb, 16);
1816  s->restart_count = 0;
1817  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1818  s->restart_interval);
1819 
1820  return 0;
1821 }
1822 
1824 {
1825  int len, id, i;
1826 
1827  len = get_bits(&s->gb, 16);
1828  if (len < 6) {
1829  if (s->bayer) {
1830  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1831  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1832  skip_bits(&s->gb, len);
1833  return 0;
1834  } else
1835  return AVERROR_INVALIDDATA;
1836  }
1837  if (8 * len > get_bits_left(&s->gb))
1838  return AVERROR_INVALIDDATA;
1839 
1840  id = get_bits_long(&s->gb, 32);
1841  len -= 6;
1842 
1843  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1844  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1845  av_fourcc2str(av_bswap32(id)), id, len);
1846 
1847  /* Buggy AVID, it puts EOI only at every 10th frame. */
1848  /* Also, this fourcc is used by non-avid files too, it holds some
1849  information, but it's always present in AVID-created files. */
1850  if (id == AV_RB32("AVI1")) {
1851  /* structure:
1852  4bytes AVI1
1853  1bytes polarity
1854  1bytes always zero
1855  4bytes field_size
1856  4bytes field_size_less_padding
1857  */
1858  s->buggy_avid = 1;
1859  i = get_bits(&s->gb, 8); len--;
1860  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1861  goto out;
1862  }
1863 
1864  if (id == AV_RB32("JFIF")) {
1865  int t_w, t_h, v1, v2;
1866  if (len < 8)
1867  goto out;
1868  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1869  v1 = get_bits(&s->gb, 8);
1870  v2 = get_bits(&s->gb, 8);
1871  skip_bits(&s->gb, 8);
1872 
1873  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1874  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1875  if ( s->avctx->sample_aspect_ratio.num <= 0
1876  || s->avctx->sample_aspect_ratio.den <= 0) {
1877  s->avctx->sample_aspect_ratio.num = 0;
1878  s->avctx->sample_aspect_ratio.den = 1;
1879  }
1880 
1881  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1882  av_log(s->avctx, AV_LOG_INFO,
1883  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1884  v1, v2,
1885  s->avctx->sample_aspect_ratio.num,
1886  s->avctx->sample_aspect_ratio.den);
1887 
1888  len -= 8;
1889  if (len >= 2) {
1890  t_w = get_bits(&s->gb, 8);
1891  t_h = get_bits(&s->gb, 8);
1892  if (t_w && t_h) {
1893  /* skip thumbnail */
1894  if (len -10 - (t_w * t_h * 3) > 0)
1895  len -= t_w * t_h * 3;
1896  }
1897  len -= 2;
1898  }
1899  goto out;
1900  }
1901 
1902  if ( id == AV_RB32("Adob")
1903  && len >= 7
1904  && show_bits(&s->gb, 8) == 'e'
1905  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1906  skip_bits(&s->gb, 8); /* 'e' */
1907  skip_bits(&s->gb, 16); /* version */
1908  skip_bits(&s->gb, 16); /* flags0 */
1909  skip_bits(&s->gb, 16); /* flags1 */
1910  s->adobe_transform = get_bits(&s->gb, 8);
1911  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1912  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1913  len -= 7;
1914  goto out;
1915  }
1916 
1917  if (id == AV_RB32("LJIF")) {
1918  int rgb = s->rgb;
1919  int pegasus_rct = s->pegasus_rct;
1920  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1921  av_log(s->avctx, AV_LOG_INFO,
1922  "Pegasus lossless jpeg header found\n");
1923  skip_bits(&s->gb, 16); /* version ? */
1924  skip_bits(&s->gb, 16); /* unknown always 0? */
1925  skip_bits(&s->gb, 16); /* unknown always 0? */
1926  skip_bits(&s->gb, 16); /* unknown always 0? */
1927  switch (i=get_bits(&s->gb, 8)) {
1928  case 1:
1929  rgb = 1;
1930  pegasus_rct = 0;
1931  break;
1932  case 2:
1933  rgb = 1;
1934  pegasus_rct = 1;
1935  break;
1936  default:
1937  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1938  }
1939 
1940  len -= 9;
1941  if (s->got_picture)
1942  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1943  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1944  goto out;
1945  }
1946 
1947  s->rgb = rgb;
1948  s->pegasus_rct = pegasus_rct;
1949 
1950  goto out;
1951  }
1952  if (id == AV_RL32("colr") && len > 0) {
1953  s->colr = get_bits(&s->gb, 8);
1954  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1955  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1956  len --;
1957  goto out;
1958  }
1959  if (id == AV_RL32("xfrm") && len > 0) {
1960  s->xfrm = get_bits(&s->gb, 8);
1961  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1962  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1963  len --;
1964  goto out;
1965  }
1966 
1967  /* JPS extension by VRex */
1968  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1969  int flags, layout, type;
1970  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1971  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1972 
1973  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1974  skip_bits(&s->gb, 16); len -= 2; /* block length */
1975  skip_bits(&s->gb, 8); /* reserved */
1976  flags = get_bits(&s->gb, 8);
1977  layout = get_bits(&s->gb, 8);
1978  type = get_bits(&s->gb, 8);
1979  len -= 4;
1980 
1981  av_freep(&s->stereo3d);
1982  s->stereo3d = av_stereo3d_alloc();
1983  if (!s->stereo3d) {
1984  goto out;
1985  }
1986  if (type == 0) {
1987  s->stereo3d->type = AV_STEREO3D_2D;
1988  } else if (type == 1) {
1989  switch (layout) {
1990  case 0x01:
1991  s->stereo3d->type = AV_STEREO3D_LINES;
1992  break;
1993  case 0x02:
1994  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1995  break;
1996  case 0x03:
1997  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
1998  break;
1999  }
2000  if (!(flags & 0x04)) {
2001  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2002  }
2003  }
2004  goto out;
2005  }
2006 
2007  /* EXIF metadata */
2008  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2009  GetByteContext gbytes;
2010  int ret, le, ifd_offset, bytes_read;
2011  const uint8_t *aligned;
2012 
2013  skip_bits(&s->gb, 16); // skip padding
2014  len -= 2;
2015 
2016  // init byte wise reading
2017  aligned = align_get_bits(&s->gb);
2018  bytestream2_init(&gbytes, aligned, len);
2019 
2020  // read TIFF header
2021  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2022  if (ret) {
2023  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2024  } else {
2025  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2026 
2027  // read 0th IFD and store the metadata
2028  // (return values > 0 indicate the presence of subimage metadata)
2029  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2030  if (ret < 0) {
2031  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2032  }
2033  }
2034 
2035  bytes_read = bytestream2_tell(&gbytes);
2036  skip_bits(&s->gb, bytes_read << 3);
2037  len -= bytes_read;
2038 
2039  goto out;
2040  }
2041 
2042  /* Apple MJPEG-A */
2043  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2044  id = get_bits_long(&s->gb, 32);
2045  len -= 4;
2046  /* Apple MJPEG-A */
2047  if (id == AV_RB32("mjpg")) {
2048  /* structure:
2049  4bytes field size
2050  4bytes pad field size
2051  4bytes next off
2052  4bytes quant off
2053  4bytes huff off
2054  4bytes image off
2055  4bytes scan off
2056  4bytes data off
2057  */
2058  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2059  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2060  }
2061  }
2062 
2063  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2064  int id2;
2065  unsigned seqno;
2066  unsigned nummarkers;
2067 
2068  id = get_bits_long(&s->gb, 32);
2069  id2 = get_bits(&s->gb, 24);
2070  len -= 7;
2071  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2072  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2073  goto out;
2074  }
2075 
2076  skip_bits(&s->gb, 8);
2077  seqno = get_bits(&s->gb, 8);
2078  len -= 2;
2079  if (seqno == 0) {
2080  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2081  goto out;
2082  }
2083 
2084  nummarkers = get_bits(&s->gb, 8);
2085  len -= 1;
2086  if (nummarkers == 0) {
2087  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2088  goto out;
2089  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2090  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2091  goto out;
2092  } else if (seqno > nummarkers) {
2093  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2094  goto out;
2095  }
2096 
2097  /* Allocate if this is the first APP2 we've seen. */
2098  if (s->iccnum == 0) {
2099  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2100  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2101  return AVERROR(ENOMEM);
2102  }
2103  s->iccnum = nummarkers;
2104  }
2105 
2106  if (s->iccentries[seqno - 1].data) {
2107  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2108  goto out;
2109  }
2110 
2111  s->iccentries[seqno - 1].length = len;
2112  s->iccentries[seqno - 1].data = av_malloc(len);
2113  if (!s->iccentries[seqno - 1].data) {
2114  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2115  return AVERROR(ENOMEM);
2116  }
2117 
2118  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2119  skip_bits(&s->gb, len << 3);
2120  len = 0;
2121  s->iccread++;
2122 
2123  if (s->iccread > s->iccnum)
2124  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2125  }
2126 
2127 out:
2128  /* slow but needed for extreme adobe jpegs */
2129  if (len < 0)
2130  av_log(s->avctx, AV_LOG_ERROR,
2131  "mjpeg: error, decode_app parser read over the end\n");
2132  while (--len > 0)
2133  skip_bits(&s->gb, 8);
2134 
2135  return 0;
2136 }
2137 
2139 {
2140  int len = get_bits(&s->gb, 16);
2141  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2142  int i;
2143  char *cbuf = av_malloc(len - 1);
2144  if (!cbuf)
2145  return AVERROR(ENOMEM);
2146 
2147  for (i = 0; i < len - 2; i++)
2148  cbuf[i] = get_bits(&s->gb, 8);
2149  if (i > 0 && cbuf[i - 1] == '\n')
2150  cbuf[i - 1] = 0;
2151  else
2152  cbuf[i] = 0;
2153 
2154  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2155  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2156 
2157  /* buggy avid, it puts EOI only at every 10th frame */
2158  if (!strncmp(cbuf, "AVID", 4)) {
2159  parse_avid(s, cbuf, len);
2160  } else if (!strcmp(cbuf, "CS=ITU601"))
2161  s->cs_itu601 = 1;
2162  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2163  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2164  s->flipped = 1;
2165  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2166  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2167  s->multiscope = 2;
2168  }
2169 
2170  av_free(cbuf);
2171  }
2172 
2173  return 0;
2174 }
2175 
2176 /* return the 8 bit start code value and update the search
2177  state. Return -1 if no start code found */
2178 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2179 {
2180  const uint8_t *buf_ptr;
2181  unsigned int v, v2;
2182  int val;
2183  int skipped = 0;
2184 
2185  buf_ptr = *pbuf_ptr;
2186  while (buf_end - buf_ptr > 1) {
2187  v = *buf_ptr++;
2188  v2 = *buf_ptr;
2189  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2190  val = *buf_ptr++;
2191  goto found;
2192  }
2193  skipped++;
2194  }
2195  buf_ptr = buf_end;
2196  val = -1;
2197 found:
2198  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2199  *pbuf_ptr = buf_ptr;
2200  return val;
2201 }
2202 
2204  const uint8_t **buf_ptr, const uint8_t *buf_end,
2205  const uint8_t **unescaped_buf_ptr,
2206  int *unescaped_buf_size)
2207 {
2208  int start_code;
2209  start_code = find_marker(buf_ptr, buf_end);
2210 
2211  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2212  if (!s->buffer)
2213  return AVERROR(ENOMEM);
2214 
2215  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2216  if (start_code == SOS && !s->ls) {
2217  const uint8_t *src = *buf_ptr;
2218  const uint8_t *ptr = src;
2219  uint8_t *dst = s->buffer;
2220 
2221  #define copy_data_segment(skip) do { \
2222  ptrdiff_t length = (ptr - src) - (skip); \
2223  if (length > 0) { \
2224  memcpy(dst, src, length); \
2225  dst += length; \
2226  src = ptr; \
2227  } \
2228  } while (0)
2229 
2230  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2231  ptr = buf_end;
2232  copy_data_segment(0);
2233  } else {
2234  while (ptr < buf_end) {
2235  uint8_t x = *(ptr++);
2236 
2237  if (x == 0xff) {
2238  ptrdiff_t skip = 0;
2239  while (ptr < buf_end && x == 0xff) {
2240  x = *(ptr++);
2241  skip++;
2242  }
2243 
2244  /* 0xFF, 0xFF, ... */
2245  if (skip > 1) {
2246  copy_data_segment(skip);
2247 
2248  /* decrement src as it is equal to ptr after the
2249  * copy_data_segment macro and we might want to
2250  * copy the current value of x later on */
2251  src--;
2252  }
2253 
2254  if (x < RST0 || x > RST7) {
2255  copy_data_segment(1);
2256  if (x)
2257  break;
2258  }
2259  }
2260  }
2261  if (src < ptr)
2262  copy_data_segment(0);
2263  }
2264  #undef copy_data_segment
2265 
2266  *unescaped_buf_ptr = s->buffer;
2267  *unescaped_buf_size = dst - s->buffer;
2268  memset(s->buffer + *unescaped_buf_size, 0,
2270 
2271  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2272  (buf_end - *buf_ptr) - (dst - s->buffer));
2273  } else if (start_code == SOS && s->ls) {
2274  const uint8_t *src = *buf_ptr;
2275  uint8_t *dst = s->buffer;
2276  int bit_count = 0;
2277  int t = 0, b = 0;
2278  PutBitContext pb;
2279 
2280  /* find marker */
2281  while (src + t < buf_end) {
2282  uint8_t x = src[t++];
2283  if (x == 0xff) {
2284  while ((src + t < buf_end) && x == 0xff)
2285  x = src[t++];
2286  if (x & 0x80) {
2287  t -= FFMIN(2, t);
2288  break;
2289  }
2290  }
2291  }
2292  bit_count = t * 8;
2293  init_put_bits(&pb, dst, t);
2294 
2295  /* unescape bitstream */
2296  while (b < t) {
2297  uint8_t x = src[b++];
2298  put_bits(&pb, 8, x);
2299  if (x == 0xFF && b < t) {
2300  x = src[b++];
2301  if (x & 0x80) {
2302  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2303  x &= 0x7f;
2304  }
2305  put_bits(&pb, 7, x);
2306  bit_count--;
2307  }
2308  }
2309  flush_put_bits(&pb);
2310 
2311  *unescaped_buf_ptr = dst;
2312  *unescaped_buf_size = (bit_count + 7) >> 3;
2313  memset(s->buffer + *unescaped_buf_size, 0,
2315  } else {
2316  *unescaped_buf_ptr = *buf_ptr;
2317  *unescaped_buf_size = buf_end - *buf_ptr;
2318  }
2319 
2320  return start_code;
2321 }
2322 
2324 {
2325  int i;
2326 
2327  if (s->iccentries) {
2328  for (i = 0; i < s->iccnum; i++)
2329  av_freep(&s->iccentries[i].data);
2330  av_freep(&s->iccentries);
2331  }
2332 
2333  s->iccread = 0;
2334  s->iccnum = 0;
2335 }
2336 
2337 // SMV JPEG just stacks several output frames into one JPEG picture
2338 // we handle that by setting up the cropping parameters appropriately
2340 {
2341  MJpegDecodeContext *s = avctx->priv_data;
2342  int ret;
2343 
2344  if (s->smv_next_frame > 0) {
2345  av_assert0(s->smv_frame->buf[0]);
2347  ret = av_frame_ref(frame, s->smv_frame);
2348  if (ret < 0)
2349  return ret;
2350  } else {
2351  av_assert0(frame->buf[0]);
2352  av_frame_unref(s->smv_frame);
2353  ret = av_frame_ref(s->smv_frame, frame);
2354  if (ret < 0)
2355  return ret;
2356  }
2357 
2358  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2359 
2360  frame->width = avctx->coded_width;
2361  frame->height = avctx->coded_height;
2362  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2363  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2364 
2365  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
2366 
2367  if (s->smv_next_frame == 0)
2368  av_frame_unref(s->smv_frame);
2369 
2370  return 0;
2371 }
2372 
2374 {
2375  MJpegDecodeContext *s = avctx->priv_data;
2376  int ret;
2377 
2378  av_packet_unref(s->pkt);
2379  ret = ff_decode_get_packet(avctx, s->pkt);
2380  if (ret < 0)
2381  return ret;
2382 
2383 #if CONFIG_SP5X_DECODER || CONFIG_AMV_DECODER
2384  if (avctx->codec_id == AV_CODEC_ID_SP5X ||
2385  avctx->codec_id == AV_CODEC_ID_AMV) {
2386  ret = ff_sp5x_process_packet(avctx, s->pkt);
2387  if (ret < 0)
2388  return ret;
2389  }
2390 #endif
2391 
2392  s->buf_size = s->pkt->size;
2393 
2394  return 0;
2395 }
2396 
2398 {
2399  MJpegDecodeContext *s = avctx->priv_data;
2400  const uint8_t *buf_end, *buf_ptr;
2401  const uint8_t *unescaped_buf_ptr;
2402  int hshift, vshift;
2403  int unescaped_buf_size;
2404  int start_code;
2405  int i, index;
2406  int ret = 0;
2407  int is16bit;
2408  AVDictionaryEntry *e = NULL;
2409 
2410  s->force_pal8 = 0;
2411 
2412  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG && s->smv_next_frame > 0)
2413  return smv_process_frame(avctx, frame);
2414 
2415  av_dict_free(&s->exif_metadata);
2416  av_freep(&s->stereo3d);
2417  s->adobe_transform = -1;
2418 
2419  if (s->iccnum != 0)
2421 
2422  ret = mjpeg_get_packet(avctx);
2423  if (ret < 0)
2424  return ret;
2425 redo_for_pal8:
2426  buf_ptr = s->pkt->data;
2427  buf_end = s->pkt->data + s->pkt->size;
2428  while (buf_ptr < buf_end) {
2429  /* find start next marker */
2430  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2431  &unescaped_buf_ptr,
2432  &unescaped_buf_size);
2433  /* EOF */
2434  if (start_code < 0) {
2435  break;
2436  } else if (unescaped_buf_size > INT_MAX / 8) {
2437  av_log(avctx, AV_LOG_ERROR,
2438  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2439  start_code, unescaped_buf_size, s->pkt->size);
2440  return AVERROR_INVALIDDATA;
2441  }
2442  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2443  start_code, buf_end - buf_ptr);
2444 
2445  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2446 
2447  if (ret < 0) {
2448  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2449  goto fail;
2450  }
2451 
2452  s->start_code = start_code;
2453  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2454  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2455 
2456  /* process markers */
2457  if (start_code >= RST0 && start_code <= RST7) {
2458  av_log(avctx, AV_LOG_DEBUG,
2459  "restart marker: %d\n", start_code & 0x0f);
2460  /* APP fields */
2461  } else if (start_code >= APP0 && start_code <= APP15) {
2462  if ((ret = mjpeg_decode_app(s)) < 0)
2463  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2464  av_err2str(ret));
2465  /* Comment */
2466  } else if (start_code == COM) {
2467  ret = mjpeg_decode_com(s);
2468  if (ret < 0)
2469  return ret;
2470  } else if (start_code == DQT) {
2472  if (ret < 0)
2473  return ret;
2474  }
2475 
2476  ret = -1;
2477 
2478  if (!CONFIG_JPEGLS_DECODER &&
2479  (start_code == SOF48 || start_code == LSE)) {
2480  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2481  return AVERROR(ENOSYS);
2482  }
2483 
2484  if (avctx->skip_frame == AVDISCARD_ALL) {
2485  switch(start_code) {
2486  case SOF0:
2487  case SOF1:
2488  case SOF2:
2489  case SOF3:
2490  case SOF48:
2491  case SOI:
2492  case SOS:
2493  case EOI:
2494  break;
2495  default:
2496  goto skip;
2497  }
2498  }
2499 
2500  switch (start_code) {
2501  case SOI:
2502  s->restart_interval = 0;
2503  s->restart_count = 0;
2504  s->raw_image_buffer = buf_ptr;
2505  s->raw_image_buffer_size = buf_end - buf_ptr;
2506  /* nothing to do on SOI */
2507  break;
2508  case DHT:
2509  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2510  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2511  goto fail;
2512  }
2513  break;
2514  case SOF0:
2515  case SOF1:
2516  if (start_code == SOF0)
2517  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2518  else
2520  s->lossless = 0;
2521  s->ls = 0;
2522  s->progressive = 0;
2523  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2524  goto fail;
2525  break;
2526  case SOF2:
2527  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2528  s->lossless = 0;
2529  s->ls = 0;
2530  s->progressive = 1;
2531  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2532  goto fail;
2533  break;
2534  case SOF3:
2535  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2536  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2537  s->lossless = 1;
2538  s->ls = 0;
2539  s->progressive = 0;
2540  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2541  goto fail;
2542  break;
2543  case SOF48:
2544  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2545  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2546  s->lossless = 1;
2547  s->ls = 1;
2548  s->progressive = 0;
2549  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2550  goto fail;
2551  break;
2552  case LSE:
2553  if (!CONFIG_JPEGLS_DECODER ||
2554  (ret = ff_jpegls_decode_lse(s)) < 0)
2555  goto fail;
2556  if (ret == 1)
2557  goto redo_for_pal8;
2558  break;
2559  case EOI:
2560 eoi_parser:
2561  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2562  s->progressive && s->cur_scan && s->got_picture)
2564  s->cur_scan = 0;
2565  if (!s->got_picture) {
2566  av_log(avctx, AV_LOG_WARNING,
2567  "Found EOI before any SOF, ignoring\n");
2568  break;
2569  }
2570  if (s->interlaced) {
2571  s->bottom_field ^= 1;
2572  /* if not bottom field, do not output image yet */
2573  if (s->bottom_field == !s->interlace_polarity)
2574  break;
2575  }
2576  if (avctx->skip_frame == AVDISCARD_ALL) {
2577  s->got_picture = 0;
2578  ret = AVERROR(EAGAIN);
2579  goto the_end_no_picture;
2580  }
2581  if (s->avctx->hwaccel) {
2582  ret = s->avctx->hwaccel->end_frame(s->avctx);
2583  if (ret < 0)
2584  return ret;
2585 
2586  av_freep(&s->hwaccel_picture_private);
2587  }
2588  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2589  return ret;
2590  s->got_picture = 0;
2591 
2592  frame->pkt_dts = s->pkt->dts;
2593 
2594  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2595  int qp = FFMAX3(s->qscale[0],
2596  s->qscale[1],
2597  s->qscale[2]);
2598 
2599  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2600  }
2601 
2602  goto the_end;
2603  case SOS:
2604  s->raw_scan_buffer = buf_ptr;
2605  s->raw_scan_buffer_size = buf_end - buf_ptr;
2606 
2607  s->cur_scan++;
2608  if (avctx->skip_frame == AVDISCARD_ALL) {
2609  skip_bits(&s->gb, get_bits_left(&s->gb));
2610  break;
2611  }
2612 
2613  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2614  (avctx->err_recognition & AV_EF_EXPLODE))
2615  goto fail;
2616  break;
2617  case DRI:
2618  if ((ret = mjpeg_decode_dri(s)) < 0)
2619  return ret;
2620  break;
2621  case SOF5:
2622  case SOF6:
2623  case SOF7:
2624  case SOF9:
2625  case SOF10:
2626  case SOF11:
2627  case SOF13:
2628  case SOF14:
2629  case SOF15:
2630  case JPG:
2631  av_log(avctx, AV_LOG_ERROR,
2632  "mjpeg: unsupported coding type (%x)\n", start_code);
2633  break;
2634  }
2635 
2636 skip:
2637  /* eof process start code */
2638  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2639  av_log(avctx, AV_LOG_DEBUG,
2640  "marker parser used %d bytes (%d bits)\n",
2641  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2642  }
2643  if (s->got_picture && s->cur_scan) {
2644  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2645  goto eoi_parser;
2646  }
2647  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2648  return AVERROR_INVALIDDATA;
2649 fail:
2650  s->got_picture = 0;
2651  return ret;
2652 the_end:
2653 
2654  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2655 
2656  if (AV_RB32(s->upscale_h)) {
2657  int p;
2659  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2660  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2661  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2662  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2663  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2664  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2665  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2666  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2667  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2668  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2669  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2670  );
2671  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2672  if (ret)
2673  return ret;
2674 
2675  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2676  for (p = 0; p<s->nb_components; p++) {
2677  uint8_t *line = s->picture_ptr->data[p];
2678  int w = s->width;
2679  int h = s->height;
2680  if (!s->upscale_h[p])
2681  continue;
2682  if (p==1 || p==2) {
2683  w = AV_CEIL_RSHIFT(w, hshift);
2684  h = AV_CEIL_RSHIFT(h, vshift);
2685  }
2686  if (s->upscale_v[p] == 1)
2687  h = (h+1)>>1;
2688  av_assert0(w > 0);
2689  for (i = 0; i < h; i++) {
2690  if (s->upscale_h[p] == 1) {
2691  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2692  else line[w - 1] = line[(w - 1) / 2];
2693  for (index = w - 2; index > 0; index--) {
2694  if (is16bit)
2695  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2696  else
2697  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2698  }
2699  } else if (s->upscale_h[p] == 2) {
2700  if (is16bit) {
2701  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2702  if (w > 1)
2703  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2704  } else {
2705  line[w - 1] = line[(w - 1) / 3];
2706  if (w > 1)
2707  line[w - 2] = line[w - 1];
2708  }
2709  for (index = w - 3; index > 0; index--) {
2710  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2711  }
2712  }
2713  line += s->linesize[p];
2714  }
2715  }
2716  }
2717  if (AV_RB32(s->upscale_v)) {
2718  int p;
2720  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2721  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2722  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2723  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2724  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2725  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2726  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2727  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2728  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2729  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2730  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2731  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2732  );
2733  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2734  if (ret)
2735  return ret;
2736 
2737  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2738  for (p = 0; p < s->nb_components; p++) {
2739  uint8_t *dst;
2740  int w = s->width;
2741  int h = s->height;
2742  if (!s->upscale_v[p])
2743  continue;
2744  if (p==1 || p==2) {
2745  w = AV_CEIL_RSHIFT(w, hshift);
2746  h = AV_CEIL_RSHIFT(h, vshift);
2747  }
2748  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2749  for (i = h - 1; i; i--) {
2750  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2751  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2752  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2753  memcpy(dst, src1, w);
2754  } else {
2755  for (index = 0; index < w; index++)
2756  dst[index] = (src1[index] + src2[index]) >> 1;
2757  }
2758  dst -= s->linesize[p];
2759  }
2760  }
2761  }
2762  if (s->flipped && !s->rgb) {
2763  int j;
2764  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2765  if (ret)
2766  return ret;
2767 
2768  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2769  for (index=0; index<s->nb_components; index++) {
2770  uint8_t *dst = s->picture_ptr->data[index];
2771  int w = s->picture_ptr->width;
2772  int h = s->picture_ptr->height;
2773  if(index && index<3){
2774  w = AV_CEIL_RSHIFT(w, hshift);
2775  h = AV_CEIL_RSHIFT(h, vshift);
2776  }
2777  if(dst){
2778  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2779  for (i=0; i<h/2; i++) {
2780  for (j=0; j<w; j++)
2781  FFSWAP(int, dst[j], dst2[j]);
2782  dst += s->picture_ptr->linesize[index];
2783  dst2 -= s->picture_ptr->linesize[index];
2784  }
2785  }
2786  }
2787  }
2788  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2789  int w = s->picture_ptr->width;
2790  int h = s->picture_ptr->height;
2791  av_assert0(s->nb_components == 4);
2792  for (i=0; i<h; i++) {
2793  int j;
2794  uint8_t *dst[4];
2795  for (index=0; index<4; index++) {
2796  dst[index] = s->picture_ptr->data[index]
2797  + s->picture_ptr->linesize[index]*i;
2798  }
2799  for (j=0; j<w; j++) {
2800  int k = dst[3][j];
2801  int r = dst[0][j] * k;
2802  int g = dst[1][j] * k;
2803  int b = dst[2][j] * k;
2804  dst[0][j] = g*257 >> 16;
2805  dst[1][j] = b*257 >> 16;
2806  dst[2][j] = r*257 >> 16;
2807  dst[3][j] = 255;
2808  }
2809  }
2810  }
2811  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2812  int w = s->picture_ptr->width;
2813  int h = s->picture_ptr->height;
2814  av_assert0(s->nb_components == 4);
2815  for (i=0; i<h; i++) {
2816  int j;
2817  uint8_t *dst[4];
2818  for (index=0; index<4; index++) {
2819  dst[index] = s->picture_ptr->data[index]
2820  + s->picture_ptr->linesize[index]*i;
2821  }
2822  for (j=0; j<w; j++) {
2823  int k = dst[3][j];
2824  int r = (255 - dst[0][j]) * k;
2825  int g = (128 - dst[1][j]) * k;
2826  int b = (128 - dst[2][j]) * k;
2827  dst[0][j] = r*257 >> 16;
2828  dst[1][j] = (g*257 >> 16) + 128;
2829  dst[2][j] = (b*257 >> 16) + 128;
2830  dst[3][j] = 255;
2831  }
2832  }
2833  }
2834 
2835  if (s->stereo3d) {
2837  if (stereo) {
2838  stereo->type = s->stereo3d->type;
2839  stereo->flags = s->stereo3d->flags;
2840  }
2841  av_freep(&s->stereo3d);
2842  }
2843 
2844  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2845  AVFrameSideData *sd;
2846  size_t offset = 0;
2847  int total_size = 0;
2848  int i;
2849 
2850  /* Sum size of all parts. */
2851  for (i = 0; i < s->iccnum; i++)
2852  total_size += s->iccentries[i].length;
2853 
2855  if (!sd) {
2856  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2857  return AVERROR(ENOMEM);
2858  }
2859 
2860  /* Reassemble the parts, which are now in-order. */
2861  for (i = 0; i < s->iccnum; i++) {
2862  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2863  offset += s->iccentries[i].length;
2864  }
2865  }
2866 
2867  if (e = av_dict_get(s->exif_metadata, "Orientation", e, AV_DICT_IGNORE_SUFFIX)) {
2868  char *value = e->value + strspn(e->value, " \n\t\r"), *endptr;
2869  int orientation = strtol(value, &endptr, 0);
2870 
2871  if (!*endptr) {
2872  AVFrameSideData *sd = NULL;
2873 
2874  if (orientation >= 2 && orientation <= 8) {
2875  int32_t *matrix;
2876 
2878  if (!sd) {
2879  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2880  return AVERROR(ENOMEM);
2881  }
2882 
2883  matrix = (int32_t *)sd->data;
2884 
2885  switch (orientation) {
2886  case 2:
2887  av_display_rotation_set(matrix, 0.0);
2888  av_display_matrix_flip(matrix, 1, 0);
2889  break;
2890  case 3:
2891  av_display_rotation_set(matrix, 180.0);
2892  break;
2893  case 4:
2894  av_display_rotation_set(matrix, 180.0);
2895  av_display_matrix_flip(matrix, 1, 0);
2896  break;
2897  case 5:
2898  av_display_rotation_set(matrix, 90.0);
2899  av_display_matrix_flip(matrix, 1, 0);
2900  break;
2901  case 6:
2902  av_display_rotation_set(matrix, 90.0);
2903  break;
2904  case 7:
2905  av_display_rotation_set(matrix, -90.0);
2906  av_display_matrix_flip(matrix, 1, 0);
2907  break;
2908  case 8:
2909  av_display_rotation_set(matrix, -90.0);
2910  break;
2911  default:
2912  av_assert0(0);
2913  }
2914  }
2915  }
2916  }
2917 
2918  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2919  av_dict_free(&s->exif_metadata);
2920 
2921  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
2922  ret = smv_process_frame(avctx, frame);
2923  if (ret < 0) {
2925  return ret;
2926  }
2927  }
2928  if ((avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2929  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2930  avctx->coded_height > s->orig_height) {
2931  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2932  frame->crop_top = frame->height - avctx->height;
2933  }
2934 
2935  ret = 0;
2936 
2937 the_end_no_picture:
2938  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2939  buf_end - buf_ptr);
2940 
2941  return ret;
2942 }
2943 
2944 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2945  * even without having called ff_mjpeg_decode_init(). */
2947 {
2948  MJpegDecodeContext *s = avctx->priv_data;
2949  int i, j;
2950 
2951  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2952  av_log(avctx, AV_LOG_INFO, "Single field\n");
2953  }
2954 
2955  if (s->picture) {
2956  av_frame_free(&s->picture);
2957  s->picture_ptr = NULL;
2958  } else if (s->picture_ptr)
2959  av_frame_unref(s->picture_ptr);
2960 
2961  av_frame_free(&s->smv_frame);
2962 
2963  av_freep(&s->buffer);
2964  av_freep(&s->stereo3d);
2965  av_freep(&s->ljpeg_buffer);
2966  s->ljpeg_buffer_size = 0;
2967 
2968  for (i = 0; i < 3; i++) {
2969  for (j = 0; j < 4; j++)
2970  ff_free_vlc(&s->vlcs[i][j]);
2971  }
2972  for (i = 0; i < MAX_COMPONENTS; i++) {
2973  av_freep(&s->blocks[i]);
2974  av_freep(&s->last_nnz[i]);
2975  }
2976  av_dict_free(&s->exif_metadata);
2977 
2979 
2980  av_freep(&s->hwaccel_picture_private);
2981  av_freep(&s->jls_state);
2982 
2983  return 0;
2984 }
2985 
2986 static void decode_flush(AVCodecContext *avctx)
2987 {
2988  MJpegDecodeContext *s = avctx->priv_data;
2989  s->got_picture = 0;
2990 
2991  s->smv_next_frame = 0;
2992  av_frame_unref(s->smv_frame);
2993 }
2994 
2995 #if CONFIG_MJPEG_DECODER
2996 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2997 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2998 static const AVOption options[] = {
2999  { "extern_huff", "Use external huffman table.",
3000  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
3001  { NULL },
3002 };
3003 
3004 static const AVClass mjpegdec_class = {
3005  .class_name = "MJPEG decoder",
3006  .item_name = av_default_item_name,
3007  .option = options,
3008  .version = LIBAVUTIL_VERSION_INT,
3009 };
3010 
3011 const AVCodec ff_mjpeg_decoder = {
3012  .name = "mjpeg",
3013  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
3014  .type = AVMEDIA_TYPE_VIDEO,
3015  .id = AV_CODEC_ID_MJPEG,
3016  .priv_data_size = sizeof(MJpegDecodeContext),
3018  .close = ff_mjpeg_decode_end,
3020  .flush = decode_flush,
3021  .capabilities = AV_CODEC_CAP_DR1,
3022  .max_lowres = 3,
3023  .priv_class = &mjpegdec_class,
3027  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3028 #if CONFIG_MJPEG_NVDEC_HWACCEL
3029  HWACCEL_NVDEC(mjpeg),
3030 #endif
3031 #if CONFIG_MJPEG_VAAPI_HWACCEL
3032  HWACCEL_VAAPI(mjpeg),
3033 #endif
3034  NULL
3035  },
3036 };
3037 #endif
3038 #if CONFIG_THP_DECODER
3039 const AVCodec ff_thp_decoder = {
3040  .name = "thp",
3041  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
3042  .type = AVMEDIA_TYPE_VIDEO,
3043  .id = AV_CODEC_ID_THP,
3044  .priv_data_size = sizeof(MJpegDecodeContext),
3046  .close = ff_mjpeg_decode_end,
3048  .flush = decode_flush,
3049  .capabilities = AV_CODEC_CAP_DR1,
3050  .max_lowres = 3,
3053 };
3054 #endif
3055 
3056 #if CONFIG_SMVJPEG_DECODER
3057 const AVCodec ff_smvjpeg_decoder = {
3058  .name = "smvjpeg",
3059  .long_name = NULL_IF_CONFIG_SMALL("SMV JPEG"),
3060  .type = AVMEDIA_TYPE_VIDEO,
3061  .id = AV_CODEC_ID_SMVJPEG,
3062  .priv_data_size = sizeof(MJpegDecodeContext),
3064  .close = ff_mjpeg_decode_end,
3066  .flush = decode_flush,
3067  .capabilities = AV_CODEC_CAP_DR1,
3070 };
3071 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:98
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:424
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1359
AVCodec
AVCodec.
Definition: codec.h:202
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:292
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:224
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:57
mjpeg.h
level
uint8_t level
Definition: svq3.c:204
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:603
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1089
out
FILE * out
Definition: movenc.c:54
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1391
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:605
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2986
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:953
SOF0
@ SOF0
Definition: mjpeg.h:39
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1324
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:707
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:547
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:61
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:275
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:220
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:109
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:192
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:220
index
fg index
Definition: ffmpeg_filter.c:167
AVFrame::width
int width
Definition: frame.h:389
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:446
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:597
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1629
smv_process_frame
static int smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2339
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
internal.h
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:989
AVOption
AVOption.
Definition: opt.h:247
b
#define b
Definition: input.c:40
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:787
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:143
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:68
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:798
mjpeg_get_packet
static int mjpeg_get_packet(AVCodecContext *avctx)
Definition: mjpegdec.c:2373
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:179
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:150
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1303
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
APP15
@ APP15
Definition: mjpeg.h:94
init
static int init
Definition: av_tx.c:47
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:216
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2700
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:529
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
rgb
Definition: rpzaenc.c:59
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:237
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1237
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1407
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:392
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:118
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1673
fail
#define fail()
Definition: checkasm.h:127
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:448
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1625
GetBitContext
Definition: get_bits.h:62
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2138
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:55
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2688
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:571
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:388
ff_sp5x_process_packet
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: sp5xdec.c:33
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:62
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
av_bswap32
#define av_bswap32
Definition: bswap.h:33
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:248
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:169
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:115
aligned
static int aligned(int val)
Definition: dashdec.c:169
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:854
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:416
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1823
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1627
mask
static const uint16_t mask[17]
Definition: lzw.c:38
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1036
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:150
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
width
#define width
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:98
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:417
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1628
g
const char * g
Definition: vf_curves.c:117
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:355
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: codec_par.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:159
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:415
ff_thp_decoder
const AVCodec ff_thp_decoder
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2323
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2946
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:49
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
ff_mjpeg_receive_frame
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2397
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:423
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:394
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_CODEC_ID_SP5X
@ AV_CODEC_ID_SP5X
Definition: codec_id.h:60
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
av_clip_int16
#define av_clip_int16
Definition: common.h:111
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
ff_smvjpeg_decoder
const AVCodec ff_smvjpeg_decoder
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:395
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1598
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:203
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:194
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:418
receive_frame
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:559
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
src
#define src
Definition: vp8dsp.c:255
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:200
MJpegDecodeContext
Definition: mjpegdec.h:54
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1422
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:971
lowres
static int lowres
Definition: ffplay.c:334
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1543
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1335
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:68
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1432
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:508
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1652
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1071
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:325
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:872
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1633
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:263
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:225
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:404
AVCodecHWConfigInternal
Definition: hwconfig.h:29
ff_mjpeg_bits_dc_luminance
const uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:139
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
VD
#define VD
Definition: av1dec.c:1213
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:322
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:164
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2178
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
AVCodec::id
enum AVCodecID id
Definition: codec.h:216
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:57
src1
#define src1
Definition: h264pred.c:140
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:166
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2040
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:805
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:447
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1626
display.h
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1811
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:144
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:129
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1310
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:974
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:580
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:945
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:694
pos
unsigned int pos
Definition: spdifenc.c:412
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1307
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2203
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:157
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AVFrame::height
int height
Definition: frame.h:389
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:212
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:619
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
profiles
static const AVProfile profiles[]
Definition: libfdk-aacenc.c:429
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1302
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:298
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:571
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:86
SOI
@ SOI
Definition: mjpeg.h:70
ff_mjpeg_decoder
const AVCodec ff_mjpeg_decoder
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1823
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1023
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:223
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:142
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:79
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:408
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:560
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
AVDictionaryEntry::value
char * value
Definition: dict.h:81
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:59
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
re
float re
Definition: fft.c:78