FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/display.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/opt.h"
39 #include "avcodec.h"
40 #include "blockdsp.h"
41 #include "codec_internal.h"
42 #include "copy_block.h"
43 #include "decode.h"
44 #include "hwconfig.h"
45 #include "idctdsp.h"
46 #include "internal.h"
47 #include "jpegtables.h"
48 #include "mjpeg.h"
49 #include "mjpegdec.h"
50 #include "jpeglsdec.h"
51 #include "profiles.h"
52 #include "put_bits.h"
53 #include "tiff.h"
54 #include "exif.h"
55 #include "bytestream.h"
56 #include "tiff_common.h"
57 
58 
60 {
61  static const struct {
62  int class;
63  int index;
64  const uint8_t *bits;
65  const uint8_t *values;
66  int length;
67  } ht[] = {
69  ff_mjpeg_val_dc, 12 },
71  ff_mjpeg_val_dc, 12 },
80  };
81  int i, ret;
82 
83  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
84  ff_free_vlc(&s->vlcs[ht[i].class][ht[i].index]);
85  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
86  ht[i].bits, ht[i].values,
87  ht[i].class == 1, s->avctx);
88  if (ret < 0)
89  return ret;
90 
91  if (ht[i].class < 2) {
92  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
93  ht[i].bits + 1, 16);
94  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
95  ht[i].values, ht[i].length);
96  }
97  }
98 
99  return 0;
100 }
101 
102 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
103 {
104  s->buggy_avid = 1;
105  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
106  s->interlace_polarity = 1;
107  if (len > 14 && buf[12] == 2) /* 2 - PAL */
108  s->interlace_polarity = 0;
109  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
110  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
111 }
112 
113 static void init_idct(AVCodecContext *avctx)
114 {
115  MJpegDecodeContext *s = avctx->priv_data;
116 
117  ff_idctdsp_init(&s->idsp, avctx);
118  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
119  s->idsp.idct_permutation);
120 }
121 
123 {
124  MJpegDecodeContext *s = avctx->priv_data;
125  int ret;
126 
127  if (!s->picture_ptr) {
128  s->picture = av_frame_alloc();
129  if (!s->picture)
130  return AVERROR(ENOMEM);
131  s->picture_ptr = s->picture;
132  }
133 
134  s->pkt = avctx->internal->in_pkt;
135 
136  s->avctx = avctx;
137  ff_blockdsp_init(&s->bdsp);
138  ff_hpeldsp_init(&s->hdsp, avctx->flags);
139  init_idct(avctx);
140  s->buffer_size = 0;
141  s->buffer = NULL;
142  s->start_code = -1;
143  s->first_picture = 1;
144  s->got_picture = 0;
145  s->orig_height = avctx->coded_height;
147  avctx->colorspace = AVCOL_SPC_BT470BG;
148  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
149 
150  if ((ret = init_default_huffman_tables(s)) < 0)
151  return ret;
152 
153  if (s->extern_huff) {
154  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
155  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
156  return ret;
157  if (ff_mjpeg_decode_dht(s)) {
158  av_log(avctx, AV_LOG_ERROR,
159  "error using external huffman table, switching back to internal\n");
160  if ((ret = init_default_huffman_tables(s)) < 0)
161  return ret;
162  }
163  }
164  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
165  s->interlace_polarity = 1; /* bottom field first */
166  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
167  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
168  if (avctx->codec_tag == AV_RL32("MJPG"))
169  s->interlace_polarity = 1;
170  }
171 
172  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
173  if (avctx->extradata_size >= 4)
174  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
175 
176  if (s->smv_frames_per_jpeg <= 0) {
177  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
178  return AVERROR_INVALIDDATA;
179  }
180 
181  s->smv_frame = av_frame_alloc();
182  if (!s->smv_frame)
183  return AVERROR(ENOMEM);
184  } else if (avctx->extradata_size > 8
185  && AV_RL32(avctx->extradata) == 0x2C
186  && AV_RL32(avctx->extradata+4) == 0x18) {
187  parse_avid(s, avctx->extradata, avctx->extradata_size);
188  }
189 
190  if (avctx->codec->id == AV_CODEC_ID_AMV)
191  s->flipped = 1;
192 
193  return 0;
194 }
195 
196 
197 /* quantize tables */
199 {
200  int len, index, i;
201 
202  len = get_bits(&s->gb, 16) - 2;
203 
204  if (8*len > get_bits_left(&s->gb)) {
205  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
206  return AVERROR_INVALIDDATA;
207  }
208 
209  while (len >= 65) {
210  int pr = get_bits(&s->gb, 4);
211  if (pr > 1) {
212  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
213  return AVERROR_INVALIDDATA;
214  }
215  index = get_bits(&s->gb, 4);
216  if (index >= 4)
217  return -1;
218  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
219  /* read quant table */
220  for (i = 0; i < 64; i++) {
221  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
222  if (s->quant_matrixes[index][i] == 0) {
223  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
224  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
225  if (s->avctx->err_recognition & AV_EF_EXPLODE)
226  return AVERROR_INVALIDDATA;
227  }
228  }
229 
230  // XXX FIXME fine-tune, and perhaps add dc too
231  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
232  s->quant_matrixes[index][8]) >> 1;
233  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
234  index, s->qscale[index]);
235  len -= 1 + 64 * (1+pr);
236  }
237  return 0;
238 }
239 
240 /* decode huffman tables and build VLC decoders */
242 {
243  int len, index, i, class, n, v;
244  uint8_t bits_table[17];
245  uint8_t val_table[256];
246  int ret = 0;
247 
248  len = get_bits(&s->gb, 16) - 2;
249 
250  if (8*len > get_bits_left(&s->gb)) {
251  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
252  return AVERROR_INVALIDDATA;
253  }
254 
255  while (len > 0) {
256  if (len < 17)
257  return AVERROR_INVALIDDATA;
258  class = get_bits(&s->gb, 4);
259  if (class >= 2)
260  return AVERROR_INVALIDDATA;
261  index = get_bits(&s->gb, 4);
262  if (index >= 4)
263  return AVERROR_INVALIDDATA;
264  n = 0;
265  for (i = 1; i <= 16; i++) {
266  bits_table[i] = get_bits(&s->gb, 8);
267  n += bits_table[i];
268  }
269  len -= 17;
270  if (len < n || n > 256)
271  return AVERROR_INVALIDDATA;
272 
273  for (i = 0; i < n; i++) {
274  v = get_bits(&s->gb, 8);
275  val_table[i] = v;
276  }
277  len -= n;
278 
279  /* build VLC and flush previous vlc if present */
280  ff_free_vlc(&s->vlcs[class][index]);
281  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
282  class, index, n);
283  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
284  val_table, class > 0, s->avctx)) < 0)
285  return ret;
286 
287  if (class > 0) {
288  ff_free_vlc(&s->vlcs[2][index]);
289  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
290  val_table, 0, s->avctx)) < 0)
291  return ret;
292  }
293 
294  for (i = 0; i < 16; i++)
295  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
296  for (i = 0; i < 256; i++)
297  s->raw_huffman_values[class][index][i] = val_table[i];
298  }
299  return 0;
300 }
301 
303 {
304  int len, nb_components, i, width, height, bits, ret, size_change;
305  unsigned pix_fmt_id;
306  int h_count[MAX_COMPONENTS] = { 0 };
307  int v_count[MAX_COMPONENTS] = { 0 };
308 
309  s->cur_scan = 0;
310  memset(s->upscale_h, 0, sizeof(s->upscale_h));
311  memset(s->upscale_v, 0, sizeof(s->upscale_v));
312 
313  len = get_bits(&s->gb, 16);
314  bits = get_bits(&s->gb, 8);
315 
316  if (bits > 16 || bits < 1) {
317  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
318  return AVERROR_INVALIDDATA;
319  }
320 
321  if (s->avctx->bits_per_raw_sample != bits) {
322  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
323  s->avctx->bits_per_raw_sample = bits;
324  init_idct(s->avctx);
325  }
326  if (s->pegasus_rct)
327  bits = 9;
328  if (bits == 9 && !s->pegasus_rct)
329  s->rct = 1; // FIXME ugly
330 
331  if(s->lossless && s->avctx->lowres){
332  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
333  return -1;
334  }
335 
336  height = get_bits(&s->gb, 16);
337  width = get_bits(&s->gb, 16);
338 
339  // HACK for odd_height.mov
340  if (s->interlaced && s->width == width && s->height == height + 1)
341  height= s->height;
342 
343  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
344  if (av_image_check_size(width, height, 0, s->avctx) < 0)
345  return AVERROR_INVALIDDATA;
346  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
347  return AVERROR_INVALIDDATA;
348 
349  nb_components = get_bits(&s->gb, 8);
350  if (nb_components <= 0 ||
351  nb_components > MAX_COMPONENTS)
352  return -1;
353  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
354  if (nb_components != s->nb_components) {
355  av_log(s->avctx, AV_LOG_ERROR,
356  "nb_components changing in interlaced picture\n");
357  return AVERROR_INVALIDDATA;
358  }
359  }
360  if (s->ls && !(bits <= 8 || nb_components == 1)) {
362  "JPEG-LS that is not <= 8 "
363  "bits/component or 16-bit gray");
364  return AVERROR_PATCHWELCOME;
365  }
366  if (len != 8 + 3 * nb_components) {
367  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
368  return AVERROR_INVALIDDATA;
369  }
370 
371  s->nb_components = nb_components;
372  s->h_max = 1;
373  s->v_max = 1;
374  for (i = 0; i < nb_components; i++) {
375  /* component id */
376  s->component_id[i] = get_bits(&s->gb, 8);
377  h_count[i] = get_bits(&s->gb, 4);
378  v_count[i] = get_bits(&s->gb, 4);
379  /* compute hmax and vmax (only used in interleaved case) */
380  if (h_count[i] > s->h_max)
381  s->h_max = h_count[i];
382  if (v_count[i] > s->v_max)
383  s->v_max = v_count[i];
384  s->quant_index[i] = get_bits(&s->gb, 8);
385  if (s->quant_index[i] >= 4) {
386  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
387  return AVERROR_INVALIDDATA;
388  }
389  if (!h_count[i] || !v_count[i]) {
390  av_log(s->avctx, AV_LOG_ERROR,
391  "Invalid sampling factor in component %d %d:%d\n",
392  i, h_count[i], v_count[i]);
393  return AVERROR_INVALIDDATA;
394  }
395 
396  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
397  i, h_count[i], v_count[i],
398  s->component_id[i], s->quant_index[i]);
399  }
400  if ( nb_components == 4
401  && s->component_id[0] == 'C'
402  && s->component_id[1] == 'M'
403  && s->component_id[2] == 'Y'
404  && s->component_id[3] == 'K')
405  s->adobe_transform = 0;
406 
407  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
408  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
409  return AVERROR_PATCHWELCOME;
410  }
411 
412  if (s->bayer) {
413  if (nb_components == 2) {
414  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
415  width stored in their SOF3 markers is the width of each one. We only output
416  a single component, therefore we need to adjust the output image width. We
417  handle the deinterleaving (but not the debayering) in this file. */
418  width *= 2;
419  }
420  /* They can also contain 1 component, which is double the width and half the height
421  of the final image (rows are interleaved). We don't handle the decoding in this
422  file, but leave that to the TIFF/DNG decoder. */
423  }
424 
425  /* if different size, realloc/alloc picture */
426  if (width != s->width || height != s->height || bits != s->bits ||
427  memcmp(s->h_count, h_count, sizeof(h_count)) ||
428  memcmp(s->v_count, v_count, sizeof(v_count))) {
429  size_change = 1;
430 
431  s->width = width;
432  s->height = height;
433  s->bits = bits;
434  memcpy(s->h_count, h_count, sizeof(h_count));
435  memcpy(s->v_count, v_count, sizeof(v_count));
436  s->interlaced = 0;
437  s->got_picture = 0;
438 
439  /* test interlaced mode */
440  if (s->first_picture &&
441  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
442  s->orig_height != 0 &&
443  s->height < ((s->orig_height * 3) / 4)) {
444  s->interlaced = 1;
445  s->bottom_field = s->interlace_polarity;
446  s->picture_ptr->interlaced_frame = 1;
447  s->picture_ptr->top_field_first = !s->interlace_polarity;
448  height *= 2;
449  }
450 
451  ret = ff_set_dimensions(s->avctx, width, height);
452  if (ret < 0)
453  return ret;
454 
455  if ((s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
456  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
457  s->orig_height < height)
458  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
459 
460  s->first_picture = 0;
461  } else {
462  size_change = 0;
463  }
464 
465  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
466  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
467  if (s->avctx->height <= 0)
468  return AVERROR_INVALIDDATA;
469  }
470 
471  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
472  if (s->progressive) {
473  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
474  return AVERROR_INVALIDDATA;
475  }
476  } else {
477  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
478  s->rgb = 1;
479  else if (!s->lossless)
480  s->rgb = 0;
481  /* XXX: not complete test ! */
482  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
483  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
484  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
485  (s->h_count[3] << 4) | s->v_count[3];
486  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
487  /* NOTE we do not allocate pictures large enough for the possible
488  * padding of h/v_count being 4 */
489  if (!(pix_fmt_id & 0xD0D0D0D0))
490  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
491  if (!(pix_fmt_id & 0x0D0D0D0D))
492  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
493 
494  for (i = 0; i < 8; i++) {
495  int j = 6 + (i&1) - (i&6);
496  int is = (pix_fmt_id >> (4*i)) & 0xF;
497  int js = (pix_fmt_id >> (4*j)) & 0xF;
498 
499  if (is == 1 && js != 2 && (i < 2 || i > 5))
500  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
501  if (is == 1 && js != 2 && (i < 2 || i > 5))
502  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
503 
504  if (is == 1 && js == 2) {
505  if (i & 1) s->upscale_h[j/2] = 1;
506  else s->upscale_v[j/2] = 1;
507  }
508  }
509 
510  if (s->bayer) {
511  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
512  goto unk_pixfmt;
513  }
514 
515  switch (pix_fmt_id) {
516  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
517  if (!s->bayer)
518  goto unk_pixfmt;
519  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
520  break;
521  case 0x11111100:
522  if (s->rgb)
523  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
524  else {
525  if ( s->adobe_transform == 0
526  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
527  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
528  } else {
529  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
530  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
531  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
532  }
533  }
534  av_assert0(s->nb_components == 3);
535  break;
536  case 0x11111111:
537  if (s->rgb)
538  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
539  else {
540  if (s->adobe_transform == 0 && s->bits <= 8) {
541  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
542  } else {
543  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
544  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
545  }
546  }
547  av_assert0(s->nb_components == 4);
548  break;
549  case 0x22111122:
550  case 0x22111111:
551  if (s->adobe_transform == 0 && s->bits <= 8) {
552  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
553  s->upscale_v[1] = s->upscale_v[2] = 1;
554  s->upscale_h[1] = s->upscale_h[2] = 1;
555  } else if (s->adobe_transform == 2 && s->bits <= 8) {
556  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
557  s->upscale_v[1] = s->upscale_v[2] = 1;
558  s->upscale_h[1] = s->upscale_h[2] = 1;
559  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
560  } else {
561  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
562  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
563  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
564  }
565  av_assert0(s->nb_components == 4);
566  break;
567  case 0x12121100:
568  case 0x22122100:
569  case 0x21211100:
570  case 0x21112100:
571  case 0x22211200:
572  case 0x22221100:
573  case 0x22112200:
574  case 0x11222200:
575  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
576  else
577  goto unk_pixfmt;
578  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
579  break;
580  case 0x11000000:
581  case 0x13000000:
582  case 0x14000000:
583  case 0x31000000:
584  case 0x33000000:
585  case 0x34000000:
586  case 0x41000000:
587  case 0x43000000:
588  case 0x44000000:
589  if(s->bits <= 8)
590  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
591  else
592  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
593  break;
594  case 0x12111100:
595  case 0x14121200:
596  case 0x14111100:
597  case 0x22211100:
598  case 0x22112100:
599  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
600  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
601  else
602  goto unk_pixfmt;
603  s->upscale_v[0] = s->upscale_v[1] = 1;
604  } else {
605  if (pix_fmt_id == 0x14111100)
606  s->upscale_v[1] = s->upscale_v[2] = 1;
607  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
608  else
609  goto unk_pixfmt;
610  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
611  }
612  break;
613  case 0x21111100:
614  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
615  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
616  else
617  goto unk_pixfmt;
618  s->upscale_h[0] = s->upscale_h[1] = 1;
619  } else {
620  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
621  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
622  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
623  }
624  break;
625  case 0x31111100:
626  if (s->bits > 8)
627  goto unk_pixfmt;
628  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
629  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
630  s->upscale_h[1] = s->upscale_h[2] = 2;
631  break;
632  case 0x22121100:
633  case 0x22111200:
634  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
635  else
636  goto unk_pixfmt;
637  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
638  break;
639  case 0x22111100:
640  case 0x23111100:
641  case 0x42111100:
642  case 0x24111100:
643  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
644  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
645  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
646  if (pix_fmt_id == 0x42111100) {
647  if (s->bits > 8)
648  goto unk_pixfmt;
649  s->upscale_h[1] = s->upscale_h[2] = 1;
650  } else if (pix_fmt_id == 0x24111100) {
651  if (s->bits > 8)
652  goto unk_pixfmt;
653  s->upscale_v[1] = s->upscale_v[2] = 1;
654  } else if (pix_fmt_id == 0x23111100) {
655  if (s->bits > 8)
656  goto unk_pixfmt;
657  s->upscale_v[1] = s->upscale_v[2] = 2;
658  }
659  break;
660  case 0x41111100:
661  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
662  else
663  goto unk_pixfmt;
664  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
665  break;
666  default:
667  unk_pixfmt:
668  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
669  memset(s->upscale_h, 0, sizeof(s->upscale_h));
670  memset(s->upscale_v, 0, sizeof(s->upscale_v));
671  return AVERROR_PATCHWELCOME;
672  }
673  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
674  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
675  return AVERROR_PATCHWELCOME;
676  }
677  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
678  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
679  return AVERROR_PATCHWELCOME;
680  }
681  if (s->ls) {
682  memset(s->upscale_h, 0, sizeof(s->upscale_h));
683  memset(s->upscale_v, 0, sizeof(s->upscale_v));
684  if (s->nb_components == 3) {
685  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
686  } else if (s->nb_components != 1) {
687  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
688  return AVERROR_PATCHWELCOME;
689  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
690  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
691  else if (s->bits <= 8)
692  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
693  else
694  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
695  }
696 
697  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
698  if (!s->pix_desc) {
699  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
700  return AVERROR_BUG;
701  }
702 
703  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
704  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
705  } else {
706  enum AVPixelFormat pix_fmts[] = {
707 #if CONFIG_MJPEG_NVDEC_HWACCEL
709 #endif
710 #if CONFIG_MJPEG_VAAPI_HWACCEL
712 #endif
713  s->avctx->pix_fmt,
715  };
716  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
717  if (s->hwaccel_pix_fmt < 0)
718  return AVERROR(EINVAL);
719 
720  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
721  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
722  }
723 
724  if (s->avctx->skip_frame == AVDISCARD_ALL) {
725  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
726  s->picture_ptr->key_frame = 1;
727  s->got_picture = 1;
728  return 0;
729  }
730 
731  av_frame_unref(s->picture_ptr);
732  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
733  return -1;
734  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
735  s->picture_ptr->key_frame = 1;
736  s->got_picture = 1;
737 
738  // Lets clear the palette to avoid leaving uninitialized values in it
739  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
740  memset(s->picture_ptr->data[1], 0, 1024);
741 
742  for (i = 0; i < 4; i++)
743  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
744 
745  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
746  s->width, s->height, s->linesize[0], s->linesize[1],
747  s->interlaced, s->avctx->height);
748 
749  }
750 
751  if ((s->rgb && !s->lossless && !s->ls) ||
752  (!s->rgb && s->ls && s->nb_components > 1) ||
753  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
754  ) {
755  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
756  return AVERROR_PATCHWELCOME;
757  }
758 
759  /* totally blank picture as progressive JPEG will only add details to it */
760  if (s->progressive) {
761  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
762  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
763  for (i = 0; i < s->nb_components; i++) {
764  int size = bw * bh * s->h_count[i] * s->v_count[i];
765  av_freep(&s->blocks[i]);
766  av_freep(&s->last_nnz[i]);
767  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
768  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
769  if (!s->blocks[i] || !s->last_nnz[i])
770  return AVERROR(ENOMEM);
771  s->block_stride[i] = bw * s->h_count[i];
772  }
773  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
774  }
775 
776  if (s->avctx->hwaccel) {
777  s->hwaccel_picture_private =
778  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
779  if (!s->hwaccel_picture_private)
780  return AVERROR(ENOMEM);
781 
782  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
783  s->raw_image_buffer_size);
784  if (ret < 0)
785  return ret;
786  }
787 
788  return 0;
789 }
790 
791 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
792 {
793  int code;
794  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
795  if (code < 0 || code > 16) {
796  av_log(s->avctx, AV_LOG_WARNING,
797  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
798  0, dc_index, &s->vlcs[0][dc_index]);
799  return 0xfffff;
800  }
801 
802  if (code)
803  return get_xbits(&s->gb, code);
804  else
805  return 0;
806 }
807 
808 /* decode block and dequantize */
809 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
810  int dc_index, int ac_index, uint16_t *quant_matrix)
811 {
812  int code, i, j, level, val;
813 
814  /* DC coef */
815  val = mjpeg_decode_dc(s, dc_index);
816  if (val == 0xfffff) {
817  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
818  return AVERROR_INVALIDDATA;
819  }
820  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
821  val = av_clip_int16(val);
822  s->last_dc[component] = val;
823  block[0] = val;
824  /* AC coefs */
825  i = 0;
826  {OPEN_READER(re, &s->gb);
827  do {
828  UPDATE_CACHE(re, &s->gb);
829  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
830 
831  i += ((unsigned)code) >> 4;
832  code &= 0xf;
833  if (code) {
834  if (code > MIN_CACHE_BITS - 16)
835  UPDATE_CACHE(re, &s->gb);
836 
837  {
838  int cache = GET_CACHE(re, &s->gb);
839  int sign = (~cache) >> 31;
840  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
841  }
842 
843  LAST_SKIP_BITS(re, &s->gb, code);
844 
845  if (i > 63) {
846  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
847  return AVERROR_INVALIDDATA;
848  }
849  j = s->permutated_scantable[i];
850  block[j] = level * quant_matrix[i];
851  }
852  } while (i < 63);
853  CLOSE_READER(re, &s->gb);}
854 
855  return 0;
856 }
857 
859  int component, int dc_index,
860  uint16_t *quant_matrix, int Al)
861 {
862  unsigned val;
863  s->bdsp.clear_block(block);
864  val = mjpeg_decode_dc(s, dc_index);
865  if (val == 0xfffff) {
866  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
867  return AVERROR_INVALIDDATA;
868  }
869  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
870  s->last_dc[component] = val;
871  block[0] = val;
872  return 0;
873 }
874 
875 /* decode block and dequantize - progressive JPEG version */
877  uint8_t *last_nnz, int ac_index,
878  uint16_t *quant_matrix,
879  int ss, int se, int Al, int *EOBRUN)
880 {
881  int code, i, j, val, run;
882  unsigned level;
883 
884  if (*EOBRUN) {
885  (*EOBRUN)--;
886  return 0;
887  }
888 
889  {
890  OPEN_READER(re, &s->gb);
891  for (i = ss; ; i++) {
892  UPDATE_CACHE(re, &s->gb);
893  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
894 
895  run = ((unsigned) code) >> 4;
896  code &= 0xF;
897  if (code) {
898  i += run;
899  if (code > MIN_CACHE_BITS - 16)
900  UPDATE_CACHE(re, &s->gb);
901 
902  {
903  int cache = GET_CACHE(re, &s->gb);
904  int sign = (~cache) >> 31;
905  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
906  }
907 
908  LAST_SKIP_BITS(re, &s->gb, code);
909 
910  if (i >= se) {
911  if (i == se) {
912  j = s->permutated_scantable[se];
913  block[j] = level * (quant_matrix[se] << Al);
914  break;
915  }
916  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
917  return AVERROR_INVALIDDATA;
918  }
919  j = s->permutated_scantable[i];
920  block[j] = level * (quant_matrix[i] << Al);
921  } else {
922  if (run == 0xF) {// ZRL - skip 15 coefficients
923  i += 15;
924  if (i >= se) {
925  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
926  return AVERROR_INVALIDDATA;
927  }
928  } else {
929  val = (1 << run);
930  if (run) {
931  UPDATE_CACHE(re, &s->gb);
932  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
933  LAST_SKIP_BITS(re, &s->gb, run);
934  }
935  *EOBRUN = val - 1;
936  break;
937  }
938  }
939  }
940  CLOSE_READER(re, &s->gb);
941  }
942 
943  if (i > *last_nnz)
944  *last_nnz = i;
945 
946  return 0;
947 }
948 
949 #define REFINE_BIT(j) { \
950  UPDATE_CACHE(re, &s->gb); \
951  sign = block[j] >> 15; \
952  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
953  ((quant_matrix[i] ^ sign) - sign) << Al; \
954  LAST_SKIP_BITS(re, &s->gb, 1); \
955 }
956 
957 #define ZERO_RUN \
958 for (; ; i++) { \
959  if (i > last) { \
960  i += run; \
961  if (i > se) { \
962  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
963  return -1; \
964  } \
965  break; \
966  } \
967  j = s->permutated_scantable[i]; \
968  if (block[j]) \
969  REFINE_BIT(j) \
970  else if (run-- == 0) \
971  break; \
972 }
973 
974 /* decode block and dequantize - progressive JPEG refinement pass */
976  uint8_t *last_nnz,
977  int ac_index, uint16_t *quant_matrix,
978  int ss, int se, int Al, int *EOBRUN)
979 {
980  int code, i = ss, j, sign, val, run;
981  int last = FFMIN(se, *last_nnz);
982 
983  OPEN_READER(re, &s->gb);
984  if (*EOBRUN) {
985  (*EOBRUN)--;
986  } else {
987  for (; ; i++) {
988  UPDATE_CACHE(re, &s->gb);
989  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
990 
991  if (code & 0xF) {
992  run = ((unsigned) code) >> 4;
993  UPDATE_CACHE(re, &s->gb);
994  val = SHOW_UBITS(re, &s->gb, 1);
995  LAST_SKIP_BITS(re, &s->gb, 1);
996  ZERO_RUN;
997  j = s->permutated_scantable[i];
998  val--;
999  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1000  if (i == se) {
1001  if (i > *last_nnz)
1002  *last_nnz = i;
1003  CLOSE_READER(re, &s->gb);
1004  return 0;
1005  }
1006  } else {
1007  run = ((unsigned) code) >> 4;
1008  if (run == 0xF) {
1009  ZERO_RUN;
1010  } else {
1011  val = run;
1012  run = (1 << run);
1013  if (val) {
1014  UPDATE_CACHE(re, &s->gb);
1015  run += SHOW_UBITS(re, &s->gb, val);
1016  LAST_SKIP_BITS(re, &s->gb, val);
1017  }
1018  *EOBRUN = run - 1;
1019  break;
1020  }
1021  }
1022  }
1023 
1024  if (i > *last_nnz)
1025  *last_nnz = i;
1026  }
1027 
1028  for (; i <= last; i++) {
1029  j = s->permutated_scantable[i];
1030  if (block[j])
1031  REFINE_BIT(j)
1032  }
1033  CLOSE_READER(re, &s->gb);
1034 
1035  return 0;
1036 }
1037 #undef REFINE_BIT
1038 #undef ZERO_RUN
1039 
1040 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1041 {
1042  int i;
1043  int reset = 0;
1044 
1045  if (s->restart_interval) {
1046  s->restart_count--;
1047  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1048  align_get_bits(&s->gb);
1049  for (i = 0; i < nb_components; i++) /* reset dc */
1050  s->last_dc[i] = (4 << s->bits);
1051  }
1052 
1053  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1054  /* skip RSTn */
1055  if (s->restart_count == 0) {
1056  if( show_bits(&s->gb, i) == (1 << i) - 1
1057  || show_bits(&s->gb, i) == 0xFF) {
1058  int pos = get_bits_count(&s->gb);
1059  align_get_bits(&s->gb);
1060  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1061  skip_bits(&s->gb, 8);
1062  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1063  for (i = 0; i < nb_components; i++) /* reset dc */
1064  s->last_dc[i] = (4 << s->bits);
1065  reset = 1;
1066  } else
1067  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1068  }
1069  }
1070  }
1071  return reset;
1072 }
1073 
1074 /* Handles 1 to 4 components */
1075 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1076 {
1077  int i, mb_x, mb_y;
1078  unsigned width;
1079  uint16_t (*buffer)[4];
1080  int left[4], top[4], topleft[4];
1081  const int linesize = s->linesize[0];
1082  const int mask = ((1 << s->bits) - 1) << point_transform;
1083  int resync_mb_y = 0;
1084  int resync_mb_x = 0;
1085  int vpred[6];
1086 
1087  if (!s->bayer && s->nb_components < 3)
1088  return AVERROR_INVALIDDATA;
1089  if (s->bayer && s->nb_components > 2)
1090  return AVERROR_INVALIDDATA;
1091  if (s->nb_components <= 0 || s->nb_components > 4)
1092  return AVERROR_INVALIDDATA;
1093  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1094  return AVERROR_INVALIDDATA;
1095  if (s->bayer) {
1096  if (s->rct || s->pegasus_rct)
1097  return AVERROR_INVALIDDATA;
1098  }
1099 
1100 
1101  s->restart_count = s->restart_interval;
1102 
1103  if (s->restart_interval == 0)
1104  s->restart_interval = INT_MAX;
1105 
1106  if (s->bayer)
1107  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1108  else
1109  width = s->mb_width;
1110 
1111  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1112  if (!s->ljpeg_buffer)
1113  return AVERROR(ENOMEM);
1114 
1115  buffer = s->ljpeg_buffer;
1116 
1117  for (i = 0; i < 4; i++)
1118  buffer[0][i] = 1 << (s->bits - 1);
1119 
1120  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1121  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1122 
1123  if (s->interlaced && s->bottom_field)
1124  ptr += linesize >> 1;
1125 
1126  for (i = 0; i < 4; i++)
1127  top[i] = left[i] = topleft[i] = buffer[0][i];
1128 
1129  if ((mb_y * s->width) % s->restart_interval == 0) {
1130  for (i = 0; i < 6; i++)
1131  vpred[i] = 1 << (s->bits-1);
1132  }
1133 
1134  for (mb_x = 0; mb_x < width; mb_x++) {
1135  int modified_predictor = predictor;
1136 
1137  if (get_bits_left(&s->gb) < 1) {
1138  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1139  return AVERROR_INVALIDDATA;
1140  }
1141 
1142  if (s->restart_interval && !s->restart_count){
1143  s->restart_count = s->restart_interval;
1144  resync_mb_x = mb_x;
1145  resync_mb_y = mb_y;
1146  for(i=0; i<4; i++)
1147  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1148  }
1149  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1150  modified_predictor = 1;
1151 
1152  for (i=0;i<nb_components;i++) {
1153  int pred, dc;
1154 
1155  topleft[i] = top[i];
1156  top[i] = buffer[mb_x][i];
1157 
1158  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1159  if(dc == 0xFFFFF)
1160  return -1;
1161 
1162  if (!s->bayer || mb_x) {
1163  pred = left[i];
1164  } else { /* This path runs only for the first line in bayer images */
1165  vpred[i] += dc;
1166  pred = vpred[i] - dc;
1167  }
1168 
1169  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1170 
1171  left[i] = buffer[mb_x][i] =
1172  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1173  }
1174 
1175  if (s->restart_interval && !--s->restart_count) {
1176  align_get_bits(&s->gb);
1177  skip_bits(&s->gb, 16); /* skip RSTn */
1178  }
1179  }
1180  if (s->rct && s->nb_components == 4) {
1181  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1182  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1183  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1184  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1185  ptr[4*mb_x + 0] = buffer[mb_x][3];
1186  }
1187  } else if (s->nb_components == 4) {
1188  for(i=0; i<nb_components; i++) {
1189  int c= s->comp_index[i];
1190  if (s->bits <= 8) {
1191  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1192  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1193  }
1194  } else if(s->bits == 9) {
1195  return AVERROR_PATCHWELCOME;
1196  } else {
1197  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1198  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1199  }
1200  }
1201  }
1202  } else if (s->rct) {
1203  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1204  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1205  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1206  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1207  }
1208  } else if (s->pegasus_rct) {
1209  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1210  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1211  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1212  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1213  }
1214  } else if (s->bayer) {
1215  if (s->bits <= 8)
1216  return AVERROR_PATCHWELCOME;
1217  if (nb_components == 1) {
1218  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1219  for (mb_x = 0; mb_x < width; mb_x++)
1220  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1221  } else if (nb_components == 2) {
1222  for (mb_x = 0; mb_x < width; mb_x++) {
1223  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1224  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1225  }
1226  }
1227  } else {
1228  for(i=0; i<nb_components; i++) {
1229  int c= s->comp_index[i];
1230  if (s->bits <= 8) {
1231  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1232  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1233  }
1234  } else if(s->bits == 9) {
1235  return AVERROR_PATCHWELCOME;
1236  } else {
1237  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1238  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1239  }
1240  }
1241  }
1242  }
1243  }
1244  return 0;
1245 }
1246 
1248  int point_transform, int nb_components)
1249 {
1250  int i, mb_x, mb_y, mask;
1251  int bits= (s->bits+7)&~7;
1252  int resync_mb_y = 0;
1253  int resync_mb_x = 0;
1254 
1255  point_transform += bits - s->bits;
1256  mask = ((1 << s->bits) - 1) << point_transform;
1257 
1258  av_assert0(nb_components>=1 && nb_components<=4);
1259 
1260  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1261  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1262  if (get_bits_left(&s->gb) < 1) {
1263  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1264  return AVERROR_INVALIDDATA;
1265  }
1266  if (s->restart_interval && !s->restart_count){
1267  s->restart_count = s->restart_interval;
1268  resync_mb_x = mb_x;
1269  resync_mb_y = mb_y;
1270  }
1271 
1272  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1273  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1274  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1275  for (i = 0; i < nb_components; i++) {
1276  uint8_t *ptr;
1277  uint16_t *ptr16;
1278  int n, h, v, x, y, c, j, linesize;
1279  n = s->nb_blocks[i];
1280  c = s->comp_index[i];
1281  h = s->h_scount[i];
1282  v = s->v_scount[i];
1283  x = 0;
1284  y = 0;
1285  linesize= s->linesize[c];
1286 
1287  if(bits>8) linesize /= 2;
1288 
1289  for(j=0; j<n; j++) {
1290  int pred, dc;
1291 
1292  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1293  if(dc == 0xFFFFF)
1294  return -1;
1295  if ( h * mb_x + x >= s->width
1296  || v * mb_y + y >= s->height) {
1297  // Nothing to do
1298  } else if (bits<=8) {
1299  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1300  if(y==0 && toprow){
1301  if(x==0 && leftcol){
1302  pred= 1 << (bits - 1);
1303  }else{
1304  pred= ptr[-1];
1305  }
1306  }else{
1307  if(x==0 && leftcol){
1308  pred= ptr[-linesize];
1309  }else{
1310  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1311  }
1312  }
1313 
1314  if (s->interlaced && s->bottom_field)
1315  ptr += linesize >> 1;
1316  pred &= mask;
1317  *ptr= pred + ((unsigned)dc << point_transform);
1318  }else{
1319  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1320  if(y==0 && toprow){
1321  if(x==0 && leftcol){
1322  pred= 1 << (bits - 1);
1323  }else{
1324  pred= ptr16[-1];
1325  }
1326  }else{
1327  if(x==0 && leftcol){
1328  pred= ptr16[-linesize];
1329  }else{
1330  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1331  }
1332  }
1333 
1334  if (s->interlaced && s->bottom_field)
1335  ptr16 += linesize >> 1;
1336  pred &= mask;
1337  *ptr16= pred + ((unsigned)dc << point_transform);
1338  }
1339  if (++x == h) {
1340  x = 0;
1341  y++;
1342  }
1343  }
1344  }
1345  } else {
1346  for (i = 0; i < nb_components; i++) {
1347  uint8_t *ptr;
1348  uint16_t *ptr16;
1349  int n, h, v, x, y, c, j, linesize, dc;
1350  n = s->nb_blocks[i];
1351  c = s->comp_index[i];
1352  h = s->h_scount[i];
1353  v = s->v_scount[i];
1354  x = 0;
1355  y = 0;
1356  linesize = s->linesize[c];
1357 
1358  if(bits>8) linesize /= 2;
1359 
1360  for (j = 0; j < n; j++) {
1361  int pred;
1362 
1363  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1364  if(dc == 0xFFFFF)
1365  return -1;
1366  if ( h * mb_x + x >= s->width
1367  || v * mb_y + y >= s->height) {
1368  // Nothing to do
1369  } else if (bits<=8) {
1370  ptr = s->picture_ptr->data[c] +
1371  (linesize * (v * mb_y + y)) +
1372  (h * mb_x + x); //FIXME optimize this crap
1373  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1374 
1375  pred &= mask;
1376  *ptr = pred + ((unsigned)dc << point_transform);
1377  }else{
1378  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1379  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1380 
1381  pred &= mask;
1382  *ptr16= pred + ((unsigned)dc << point_transform);
1383  }
1384 
1385  if (++x == h) {
1386  x = 0;
1387  y++;
1388  }
1389  }
1390  }
1391  }
1392  if (s->restart_interval && !--s->restart_count) {
1393  align_get_bits(&s->gb);
1394  skip_bits(&s->gb, 16); /* skip RSTn */
1395  }
1396  }
1397  }
1398  return 0;
1399 }
1400 
1402  uint8_t *dst, const uint8_t *src,
1403  int linesize, int lowres)
1404 {
1405  switch (lowres) {
1406  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1407  break;
1408  case 1: copy_block4(dst, src, linesize, linesize, 4);
1409  break;
1410  case 2: copy_block2(dst, src, linesize, linesize, 2);
1411  break;
1412  case 3: *dst = *src;
1413  break;
1414  }
1415 }
1416 
1417 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1418 {
1419  int block_x, block_y;
1420  int size = 8 >> s->avctx->lowres;
1421  if (s->bits > 8) {
1422  for (block_y=0; block_y<size; block_y++)
1423  for (block_x=0; block_x<size; block_x++)
1424  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1425  } else {
1426  for (block_y=0; block_y<size; block_y++)
1427  for (block_x=0; block_x<size; block_x++)
1428  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1429  }
1430 }
1431 
1432 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1433  int Al, const uint8_t *mb_bitmask,
1434  int mb_bitmask_size,
1435  const AVFrame *reference)
1436 {
1437  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1438  uint8_t *data[MAX_COMPONENTS];
1439  const uint8_t *reference_data[MAX_COMPONENTS];
1440  int linesize[MAX_COMPONENTS];
1441  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1442  int bytes_per_pixel = 1 + (s->bits > 8);
1443 
1444  if (mb_bitmask) {
1445  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1446  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1447  return AVERROR_INVALIDDATA;
1448  }
1449  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1450  }
1451 
1452  s->restart_count = 0;
1453 
1454  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1455  &chroma_v_shift);
1456  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1457  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1458 
1459  for (i = 0; i < nb_components; i++) {
1460  int c = s->comp_index[i];
1461  data[c] = s->picture_ptr->data[c];
1462  reference_data[c] = reference ? reference->data[c] : NULL;
1463  linesize[c] = s->linesize[c];
1464  s->coefs_finished[c] |= 1;
1465  }
1466 
1467  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1468  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1469  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1470 
1471  if (s->restart_interval && !s->restart_count)
1472  s->restart_count = s->restart_interval;
1473 
1474  if (get_bits_left(&s->gb) < 0) {
1475  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1476  -get_bits_left(&s->gb));
1477  return AVERROR_INVALIDDATA;
1478  }
1479  for (i = 0; i < nb_components; i++) {
1480  uint8_t *ptr;
1481  int n, h, v, x, y, c, j;
1482  int block_offset;
1483  n = s->nb_blocks[i];
1484  c = s->comp_index[i];
1485  h = s->h_scount[i];
1486  v = s->v_scount[i];
1487  x = 0;
1488  y = 0;
1489  for (j = 0; j < n; j++) {
1490  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1491  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1492 
1493  if (s->interlaced && s->bottom_field)
1494  block_offset += linesize[c] >> 1;
1495  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1496  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1497  ptr = data[c] + block_offset;
1498  } else
1499  ptr = NULL;
1500  if (!s->progressive) {
1501  if (copy_mb) {
1502  if (ptr)
1503  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1504  linesize[c], s->avctx->lowres);
1505 
1506  } else {
1507  s->bdsp.clear_block(s->block);
1508  if (decode_block(s, s->block, i,
1509  s->dc_index[i], s->ac_index[i],
1510  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1511  av_log(s->avctx, AV_LOG_ERROR,
1512  "error y=%d x=%d\n", mb_y, mb_x);
1513  return AVERROR_INVALIDDATA;
1514  }
1515  if (ptr) {
1516  s->idsp.idct_put(ptr, linesize[c], s->block);
1517  if (s->bits & 7)
1518  shift_output(s, ptr, linesize[c]);
1519  }
1520  }
1521  } else {
1522  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1523  (h * mb_x + x);
1524  int16_t *block = s->blocks[c][block_idx];
1525  if (Ah)
1526  block[0] += get_bits1(&s->gb) *
1527  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1528  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1529  s->quant_matrixes[s->quant_sindex[i]],
1530  Al) < 0) {
1531  av_log(s->avctx, AV_LOG_ERROR,
1532  "error y=%d x=%d\n", mb_y, mb_x);
1533  return AVERROR_INVALIDDATA;
1534  }
1535  }
1536  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1537  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1538  mb_x, mb_y, x, y, c, s->bottom_field,
1539  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1540  if (++x == h) {
1541  x = 0;
1542  y++;
1543  }
1544  }
1545  }
1546 
1547  handle_rstn(s, nb_components);
1548  }
1549  }
1550  return 0;
1551 }
1552 
1554  int se, int Ah, int Al)
1555 {
1556  int mb_x, mb_y;
1557  int EOBRUN = 0;
1558  int c = s->comp_index[0];
1559  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1560 
1561  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1562  if (se < ss || se > 63) {
1563  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1564  return AVERROR_INVALIDDATA;
1565  }
1566 
1567  // s->coefs_finished is a bitmask for coefficients coded
1568  // ss and se are parameters telling start and end coefficients
1569  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1570 
1571  s->restart_count = 0;
1572 
1573  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1574  int block_idx = mb_y * s->block_stride[c];
1575  int16_t (*block)[64] = &s->blocks[c][block_idx];
1576  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1577  if (get_bits_left(&s->gb) <= 0) {
1578  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1579  return AVERROR_INVALIDDATA;
1580  }
1581  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1582  int ret;
1583  if (s->restart_interval && !s->restart_count)
1584  s->restart_count = s->restart_interval;
1585 
1586  if (Ah)
1587  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1588  quant_matrix, ss, se, Al, &EOBRUN);
1589  else
1590  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1591  quant_matrix, ss, se, Al, &EOBRUN);
1592 
1593  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1595  if (ret < 0) {
1596  av_log(s->avctx, AV_LOG_ERROR,
1597  "error y=%d x=%d\n", mb_y, mb_x);
1598  return AVERROR_INVALIDDATA;
1599  }
1600 
1601  if (handle_rstn(s, 0))
1602  EOBRUN = 0;
1603  }
1604  }
1605  return 0;
1606 }
1607 
1609 {
1610  int mb_x, mb_y;
1611  int c;
1612  const int bytes_per_pixel = 1 + (s->bits > 8);
1613  const int block_size = s->lossless ? 1 : 8;
1614 
1615  for (c = 0; c < s->nb_components; c++) {
1616  uint8_t *data = s->picture_ptr->data[c];
1617  int linesize = s->linesize[c];
1618  int h = s->h_max / s->h_count[c];
1619  int v = s->v_max / s->v_count[c];
1620  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1621  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1622 
1623  if (~s->coefs_finished[c])
1624  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1625 
1626  if (s->interlaced && s->bottom_field)
1627  data += linesize >> 1;
1628 
1629  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1630  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1631  int block_idx = mb_y * s->block_stride[c];
1632  int16_t (*block)[64] = &s->blocks[c][block_idx];
1633  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1634  s->idsp.idct_put(ptr, linesize, *block);
1635  if (s->bits & 7)
1636  shift_output(s, ptr, linesize);
1637  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1638  }
1639  }
1640  }
1641 }
1642 
1643 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1644  int mb_bitmask_size, const AVFrame *reference)
1645 {
1646  int len, nb_components, i, h, v, predictor, point_transform;
1647  int index, id, ret;
1648  const int block_size = s->lossless ? 1 : 8;
1649  int ilv, prev_shift;
1650 
1651  if (!s->got_picture) {
1652  av_log(s->avctx, AV_LOG_WARNING,
1653  "Can not process SOS before SOF, skipping\n");
1654  return -1;
1655  }
1656 
1657  if (reference) {
1658  if (reference->width != s->picture_ptr->width ||
1659  reference->height != s->picture_ptr->height ||
1660  reference->format != s->picture_ptr->format) {
1661  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1662  return AVERROR_INVALIDDATA;
1663  }
1664  }
1665 
1666  /* XXX: verify len field validity */
1667  len = get_bits(&s->gb, 16);
1668  nb_components = get_bits(&s->gb, 8);
1669  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1671  "decode_sos: nb_components (%d)",
1672  nb_components);
1673  return AVERROR_PATCHWELCOME;
1674  }
1675  if (len != 6 + 2 * nb_components) {
1676  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1677  return AVERROR_INVALIDDATA;
1678  }
1679  for (i = 0; i < nb_components; i++) {
1680  id = get_bits(&s->gb, 8);
1681  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1682  /* find component index */
1683  for (index = 0; index < s->nb_components; index++)
1684  if (id == s->component_id[index])
1685  break;
1686  if (index == s->nb_components) {
1687  av_log(s->avctx, AV_LOG_ERROR,
1688  "decode_sos: index(%d) out of components\n", index);
1689  return AVERROR_INVALIDDATA;
1690  }
1691  /* Metasoft MJPEG codec has Cb and Cr swapped */
1692  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1693  && nb_components == 3 && s->nb_components == 3 && i)
1694  index = 3 - i;
1695 
1696  s->quant_sindex[i] = s->quant_index[index];
1697  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1698  s->h_scount[i] = s->h_count[index];
1699  s->v_scount[i] = s->v_count[index];
1700 
1701  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1702  index = (index+2)%3;
1703 
1704  s->comp_index[i] = index;
1705 
1706  s->dc_index[i] = get_bits(&s->gb, 4);
1707  s->ac_index[i] = get_bits(&s->gb, 4);
1708 
1709  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1710  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1711  goto out_of_range;
1712  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1713  goto out_of_range;
1714  }
1715 
1716  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1717  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1718  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1719  prev_shift = get_bits(&s->gb, 4); /* Ah */
1720  point_transform = get_bits(&s->gb, 4); /* Al */
1721  }else
1722  prev_shift = point_transform = 0;
1723 
1724  if (nb_components > 1) {
1725  /* interleaved stream */
1726  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1727  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1728  } else if (!s->ls) { /* skip this for JPEG-LS */
1729  h = s->h_max / s->h_scount[0];
1730  v = s->v_max / s->v_scount[0];
1731  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1732  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1733  s->nb_blocks[0] = 1;
1734  s->h_scount[0] = 1;
1735  s->v_scount[0] = 1;
1736  }
1737 
1738  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1739  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1740  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1741  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1742  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1743 
1744 
1745  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1746  for (i = s->mjpb_skiptosod; i > 0; i--)
1747  skip_bits(&s->gb, 8);
1748 
1749 next_field:
1750  for (i = 0; i < nb_components; i++)
1751  s->last_dc[i] = (4 << s->bits);
1752 
1753  if (s->avctx->hwaccel) {
1754  int bytes_to_start = get_bits_count(&s->gb) / 8;
1755  av_assert0(bytes_to_start >= 0 &&
1756  s->raw_scan_buffer_size >= bytes_to_start);
1757 
1758  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1759  s->raw_scan_buffer + bytes_to_start,
1760  s->raw_scan_buffer_size - bytes_to_start);
1761  if (ret < 0)
1762  return ret;
1763 
1764  } else if (s->lossless) {
1765  av_assert0(s->picture_ptr == s->picture);
1766  if (CONFIG_JPEGLS_DECODER && s->ls) {
1767 // for () {
1768 // reset_ls_coding_parameters(s, 0);
1769 
1771  point_transform, ilv)) < 0)
1772  return ret;
1773  } else {
1774  if (s->rgb || s->bayer) {
1775  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1776  return ret;
1777  } else {
1779  point_transform,
1780  nb_components)) < 0)
1781  return ret;
1782  }
1783  }
1784  } else {
1785  if (s->progressive && predictor) {
1786  av_assert0(s->picture_ptr == s->picture);
1788  ilv, prev_shift,
1789  point_transform)) < 0)
1790  return ret;
1791  } else {
1792  if ((ret = mjpeg_decode_scan(s, nb_components,
1793  prev_shift, point_transform,
1794  mb_bitmask, mb_bitmask_size, reference)) < 0)
1795  return ret;
1796  }
1797  }
1798 
1799  if (s->interlaced &&
1800  get_bits_left(&s->gb) > 32 &&
1801  show_bits(&s->gb, 8) == 0xFF) {
1802  GetBitContext bak = s->gb;
1803  align_get_bits(&bak);
1804  if (show_bits(&bak, 16) == 0xFFD1) {
1805  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1806  s->gb = bak;
1807  skip_bits(&s->gb, 16);
1808  s->bottom_field ^= 1;
1809 
1810  goto next_field;
1811  }
1812  }
1813 
1814  emms_c();
1815  return 0;
1816  out_of_range:
1817  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1818  return AVERROR_INVALIDDATA;
1819 }
1820 
1822 {
1823  if (get_bits(&s->gb, 16) != 4)
1824  return AVERROR_INVALIDDATA;
1825  s->restart_interval = get_bits(&s->gb, 16);
1826  s->restart_count = 0;
1827  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1828  s->restart_interval);
1829 
1830  return 0;
1831 }
1832 
1834 {
1835  int len, id, i;
1836 
1837  len = get_bits(&s->gb, 16);
1838  if (len < 6) {
1839  if (s->bayer) {
1840  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1841  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1842  skip_bits(&s->gb, len);
1843  return 0;
1844  } else
1845  return AVERROR_INVALIDDATA;
1846  }
1847  if (8 * len > get_bits_left(&s->gb))
1848  return AVERROR_INVALIDDATA;
1849 
1850  id = get_bits_long(&s->gb, 32);
1851  len -= 6;
1852 
1853  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1854  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1855  av_fourcc2str(av_bswap32(id)), id, len);
1856 
1857  /* Buggy AVID, it puts EOI only at every 10th frame. */
1858  /* Also, this fourcc is used by non-avid files too, it holds some
1859  information, but it's always present in AVID-created files. */
1860  if (id == AV_RB32("AVI1")) {
1861  /* structure:
1862  4bytes AVI1
1863  1bytes polarity
1864  1bytes always zero
1865  4bytes field_size
1866  4bytes field_size_less_padding
1867  */
1868  s->buggy_avid = 1;
1869  i = get_bits(&s->gb, 8); len--;
1870  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1871  goto out;
1872  }
1873 
1874  if (id == AV_RB32("JFIF")) {
1875  int t_w, t_h, v1, v2;
1876  if (len < 8)
1877  goto out;
1878  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1879  v1 = get_bits(&s->gb, 8);
1880  v2 = get_bits(&s->gb, 8);
1881  skip_bits(&s->gb, 8);
1882 
1883  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1884  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1885  if ( s->avctx->sample_aspect_ratio.num <= 0
1886  || s->avctx->sample_aspect_ratio.den <= 0) {
1887  s->avctx->sample_aspect_ratio.num = 0;
1888  s->avctx->sample_aspect_ratio.den = 1;
1889  }
1890 
1891  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1892  av_log(s->avctx, AV_LOG_INFO,
1893  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1894  v1, v2,
1895  s->avctx->sample_aspect_ratio.num,
1896  s->avctx->sample_aspect_ratio.den);
1897 
1898  len -= 8;
1899  if (len >= 2) {
1900  t_w = get_bits(&s->gb, 8);
1901  t_h = get_bits(&s->gb, 8);
1902  if (t_w && t_h) {
1903  /* skip thumbnail */
1904  if (len -10 - (t_w * t_h * 3) > 0)
1905  len -= t_w * t_h * 3;
1906  }
1907  len -= 2;
1908  }
1909  goto out;
1910  }
1911 
1912  if ( id == AV_RB32("Adob")
1913  && len >= 7
1914  && show_bits(&s->gb, 8) == 'e'
1915  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1916  skip_bits(&s->gb, 8); /* 'e' */
1917  skip_bits(&s->gb, 16); /* version */
1918  skip_bits(&s->gb, 16); /* flags0 */
1919  skip_bits(&s->gb, 16); /* flags1 */
1920  s->adobe_transform = get_bits(&s->gb, 8);
1921  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1922  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1923  len -= 7;
1924  goto out;
1925  }
1926 
1927  if (id == AV_RB32("LJIF")) {
1928  int rgb = s->rgb;
1929  int pegasus_rct = s->pegasus_rct;
1930  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1931  av_log(s->avctx, AV_LOG_INFO,
1932  "Pegasus lossless jpeg header found\n");
1933  skip_bits(&s->gb, 16); /* version ? */
1934  skip_bits(&s->gb, 16); /* unknown always 0? */
1935  skip_bits(&s->gb, 16); /* unknown always 0? */
1936  skip_bits(&s->gb, 16); /* unknown always 0? */
1937  switch (i=get_bits(&s->gb, 8)) {
1938  case 1:
1939  rgb = 1;
1940  pegasus_rct = 0;
1941  break;
1942  case 2:
1943  rgb = 1;
1944  pegasus_rct = 1;
1945  break;
1946  default:
1947  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1948  }
1949 
1950  len -= 9;
1951  if (s->bayer)
1952  goto out;
1953  if (s->got_picture)
1954  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1955  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1956  goto out;
1957  }
1958 
1959  s->rgb = rgb;
1960  s->pegasus_rct = pegasus_rct;
1961 
1962  goto out;
1963  }
1964  if (id == AV_RL32("colr") && len > 0) {
1965  s->colr = get_bits(&s->gb, 8);
1966  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1967  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1968  len --;
1969  goto out;
1970  }
1971  if (id == AV_RL32("xfrm") && len > 0) {
1972  s->xfrm = get_bits(&s->gb, 8);
1973  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1974  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1975  len --;
1976  goto out;
1977  }
1978 
1979  /* JPS extension by VRex */
1980  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1981  int flags, layout, type;
1982  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1983  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1984 
1985  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1986  skip_bits(&s->gb, 16); len -= 2; /* block length */
1987  skip_bits(&s->gb, 8); /* reserved */
1988  flags = get_bits(&s->gb, 8);
1989  layout = get_bits(&s->gb, 8);
1990  type = get_bits(&s->gb, 8);
1991  len -= 4;
1992 
1993  av_freep(&s->stereo3d);
1994  s->stereo3d = av_stereo3d_alloc();
1995  if (!s->stereo3d) {
1996  goto out;
1997  }
1998  if (type == 0) {
1999  s->stereo3d->type = AV_STEREO3D_2D;
2000  } else if (type == 1) {
2001  switch (layout) {
2002  case 0x01:
2003  s->stereo3d->type = AV_STEREO3D_LINES;
2004  break;
2005  case 0x02:
2006  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2007  break;
2008  case 0x03:
2009  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2010  break;
2011  }
2012  if (!(flags & 0x04)) {
2013  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2014  }
2015  }
2016  goto out;
2017  }
2018 
2019  /* EXIF metadata */
2020  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2021  GetByteContext gbytes;
2022  int ret, le, ifd_offset, bytes_read;
2023  const uint8_t *aligned;
2024 
2025  skip_bits(&s->gb, 16); // skip padding
2026  len -= 2;
2027 
2028  // init byte wise reading
2029  aligned = align_get_bits(&s->gb);
2030  bytestream2_init(&gbytes, aligned, len);
2031 
2032  // read TIFF header
2033  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2034  if (ret) {
2035  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2036  } else {
2037  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2038 
2039  // read 0th IFD and store the metadata
2040  // (return values > 0 indicate the presence of subimage metadata)
2041  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2042  if (ret < 0) {
2043  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2044  }
2045  }
2046 
2047  bytes_read = bytestream2_tell(&gbytes);
2048  skip_bits(&s->gb, bytes_read << 3);
2049  len -= bytes_read;
2050 
2051  goto out;
2052  }
2053 
2054  /* Apple MJPEG-A */
2055  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2056  id = get_bits_long(&s->gb, 32);
2057  len -= 4;
2058  /* Apple MJPEG-A */
2059  if (id == AV_RB32("mjpg")) {
2060  /* structure:
2061  4bytes field size
2062  4bytes pad field size
2063  4bytes next off
2064  4bytes quant off
2065  4bytes huff off
2066  4bytes image off
2067  4bytes scan off
2068  4bytes data off
2069  */
2070  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2071  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2072  }
2073  }
2074 
2075  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2076  int id2;
2077  unsigned seqno;
2078  unsigned nummarkers;
2079 
2080  id = get_bits_long(&s->gb, 32);
2081  id2 = get_bits(&s->gb, 24);
2082  len -= 7;
2083  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2084  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2085  goto out;
2086  }
2087 
2088  skip_bits(&s->gb, 8);
2089  seqno = get_bits(&s->gb, 8);
2090  len -= 2;
2091  if (seqno == 0) {
2092  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2093  goto out;
2094  }
2095 
2096  nummarkers = get_bits(&s->gb, 8);
2097  len -= 1;
2098  if (nummarkers == 0) {
2099  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2100  goto out;
2101  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2102  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2103  goto out;
2104  } else if (seqno > nummarkers) {
2105  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2106  goto out;
2107  }
2108 
2109  /* Allocate if this is the first APP2 we've seen. */
2110  if (s->iccnum == 0) {
2111  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2112  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2113  return AVERROR(ENOMEM);
2114  }
2115  s->iccnum = nummarkers;
2116  }
2117 
2118  if (s->iccentries[seqno - 1].data) {
2119  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2120  goto out;
2121  }
2122 
2123  s->iccentries[seqno - 1].length = len;
2124  s->iccentries[seqno - 1].data = av_malloc(len);
2125  if (!s->iccentries[seqno - 1].data) {
2126  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2127  return AVERROR(ENOMEM);
2128  }
2129 
2130  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2131  skip_bits(&s->gb, len << 3);
2132  len = 0;
2133  s->iccread++;
2134 
2135  if (s->iccread > s->iccnum)
2136  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2137  }
2138 
2139 out:
2140  /* slow but needed for extreme adobe jpegs */
2141  if (len < 0)
2142  av_log(s->avctx, AV_LOG_ERROR,
2143  "mjpeg: error, decode_app parser read over the end\n");
2144  while (--len > 0)
2145  skip_bits(&s->gb, 8);
2146 
2147  return 0;
2148 }
2149 
2151 {
2152  int len = get_bits(&s->gb, 16);
2153  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2154  int i;
2155  char *cbuf = av_malloc(len - 1);
2156  if (!cbuf)
2157  return AVERROR(ENOMEM);
2158 
2159  for (i = 0; i < len - 2; i++)
2160  cbuf[i] = get_bits(&s->gb, 8);
2161  if (i > 0 && cbuf[i - 1] == '\n')
2162  cbuf[i - 1] = 0;
2163  else
2164  cbuf[i] = 0;
2165 
2166  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2167  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2168 
2169  /* buggy avid, it puts EOI only at every 10th frame */
2170  if (!strncmp(cbuf, "AVID", 4)) {
2171  parse_avid(s, cbuf, len);
2172  } else if (!strcmp(cbuf, "CS=ITU601"))
2173  s->cs_itu601 = 1;
2174  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2175  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2176  s->flipped = 1;
2177  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2178  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2179  s->multiscope = 2;
2180  }
2181 
2182  av_free(cbuf);
2183  }
2184 
2185  return 0;
2186 }
2187 
2188 /* return the 8 bit start code value and update the search
2189  state. Return -1 if no start code found */
2190 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2191 {
2192  const uint8_t *buf_ptr;
2193  unsigned int v, v2;
2194  int val;
2195  int skipped = 0;
2196 
2197  buf_ptr = *pbuf_ptr;
2198  while (buf_end - buf_ptr > 1) {
2199  v = *buf_ptr++;
2200  v2 = *buf_ptr;
2201  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2202  val = *buf_ptr++;
2203  goto found;
2204  }
2205  skipped++;
2206  }
2207  buf_ptr = buf_end;
2208  val = -1;
2209 found:
2210  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2211  *pbuf_ptr = buf_ptr;
2212  return val;
2213 }
2214 
2216  const uint8_t **buf_ptr, const uint8_t *buf_end,
2217  const uint8_t **unescaped_buf_ptr,
2218  int *unescaped_buf_size)
2219 {
2220  int start_code;
2221  start_code = find_marker(buf_ptr, buf_end);
2222 
2223  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2224  if (!s->buffer)
2225  return AVERROR(ENOMEM);
2226 
2227  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2228  if (start_code == SOS && !s->ls) {
2229  const uint8_t *src = *buf_ptr;
2230  const uint8_t *ptr = src;
2231  uint8_t *dst = s->buffer;
2232 
2233  #define copy_data_segment(skip) do { \
2234  ptrdiff_t length = (ptr - src) - (skip); \
2235  if (length > 0) { \
2236  memcpy(dst, src, length); \
2237  dst += length; \
2238  src = ptr; \
2239  } \
2240  } while (0)
2241 
2242  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2243  ptr = buf_end;
2244  copy_data_segment(0);
2245  } else {
2246  while (ptr < buf_end) {
2247  uint8_t x = *(ptr++);
2248 
2249  if (x == 0xff) {
2250  ptrdiff_t skip = 0;
2251  while (ptr < buf_end && x == 0xff) {
2252  x = *(ptr++);
2253  skip++;
2254  }
2255 
2256  /* 0xFF, 0xFF, ... */
2257  if (skip > 1) {
2258  copy_data_segment(skip);
2259 
2260  /* decrement src as it is equal to ptr after the
2261  * copy_data_segment macro and we might want to
2262  * copy the current value of x later on */
2263  src--;
2264  }
2265 
2266  if (x < RST0 || x > RST7) {
2267  copy_data_segment(1);
2268  if (x)
2269  break;
2270  }
2271  }
2272  }
2273  if (src < ptr)
2274  copy_data_segment(0);
2275  }
2276  #undef copy_data_segment
2277 
2278  *unescaped_buf_ptr = s->buffer;
2279  *unescaped_buf_size = dst - s->buffer;
2280  memset(s->buffer + *unescaped_buf_size, 0,
2282 
2283  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2284  (buf_end - *buf_ptr) - (dst - s->buffer));
2285  } else if (start_code == SOS && s->ls) {
2286  const uint8_t *src = *buf_ptr;
2287  uint8_t *dst = s->buffer;
2288  int bit_count = 0;
2289  int t = 0, b = 0;
2290  PutBitContext pb;
2291 
2292  /* find marker */
2293  while (src + t < buf_end) {
2294  uint8_t x = src[t++];
2295  if (x == 0xff) {
2296  while ((src + t < buf_end) && x == 0xff)
2297  x = src[t++];
2298  if (x & 0x80) {
2299  t -= FFMIN(2, t);
2300  break;
2301  }
2302  }
2303  }
2304  bit_count = t * 8;
2305  init_put_bits(&pb, dst, t);
2306 
2307  /* unescape bitstream */
2308  while (b < t) {
2309  uint8_t x = src[b++];
2310  put_bits(&pb, 8, x);
2311  if (x == 0xFF && b < t) {
2312  x = src[b++];
2313  if (x & 0x80) {
2314  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2315  x &= 0x7f;
2316  }
2317  put_bits(&pb, 7, x);
2318  bit_count--;
2319  }
2320  }
2321  flush_put_bits(&pb);
2322 
2323  *unescaped_buf_ptr = dst;
2324  *unescaped_buf_size = (bit_count + 7) >> 3;
2325  memset(s->buffer + *unescaped_buf_size, 0,
2327  } else {
2328  *unescaped_buf_ptr = *buf_ptr;
2329  *unescaped_buf_size = buf_end - *buf_ptr;
2330  }
2331 
2332  return start_code;
2333 }
2334 
2336 {
2337  int i;
2338 
2339  if (s->iccentries) {
2340  for (i = 0; i < s->iccnum; i++)
2341  av_freep(&s->iccentries[i].data);
2342  av_freep(&s->iccentries);
2343  }
2344 
2345  s->iccread = 0;
2346  s->iccnum = 0;
2347 }
2348 
2349 // SMV JPEG just stacks several output frames into one JPEG picture
2350 // we handle that by setting up the cropping parameters appropriately
2352 {
2353  MJpegDecodeContext *s = avctx->priv_data;
2354  int ret;
2355 
2356  if (s->smv_next_frame > 0) {
2357  av_assert0(s->smv_frame->buf[0]);
2359  ret = av_frame_ref(frame, s->smv_frame);
2360  if (ret < 0)
2361  return ret;
2362  } else {
2363  av_assert0(frame->buf[0]);
2364  av_frame_unref(s->smv_frame);
2365  ret = av_frame_ref(s->smv_frame, frame);
2366  if (ret < 0)
2367  return ret;
2368  }
2369 
2370  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2371 
2372  frame->width = avctx->coded_width;
2373  frame->height = avctx->coded_height;
2374  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2375  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2376 
2377  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
2378 
2379  if (s->smv_next_frame == 0)
2380  av_frame_unref(s->smv_frame);
2381 
2382  return 0;
2383 }
2384 
2386 {
2387  MJpegDecodeContext *s = avctx->priv_data;
2388  int ret;
2389 
2390  av_packet_unref(s->pkt);
2391  ret = ff_decode_get_packet(avctx, s->pkt);
2392  if (ret < 0)
2393  return ret;
2394 
2395 #if CONFIG_SP5X_DECODER || CONFIG_AMV_DECODER
2396  if (avctx->codec_id == AV_CODEC_ID_SP5X ||
2397  avctx->codec_id == AV_CODEC_ID_AMV) {
2398  ret = ff_sp5x_process_packet(avctx, s->pkt);
2399  if (ret < 0)
2400  return ret;
2401  }
2402 #endif
2403 
2404  s->buf_size = s->pkt->size;
2405 
2406  return 0;
2407 }
2408 
2410 {
2411  MJpegDecodeContext *s = avctx->priv_data;
2412  const uint8_t *buf_end, *buf_ptr;
2413  const uint8_t *unescaped_buf_ptr;
2414  int hshift, vshift;
2415  int unescaped_buf_size;
2416  int start_code;
2417  int i, index;
2418  int ret = 0;
2419  int is16bit;
2420  AVDictionaryEntry *e = NULL;
2421 
2422  s->force_pal8 = 0;
2423 
2424  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG && s->smv_next_frame > 0)
2425  return smv_process_frame(avctx, frame);
2426 
2427  av_dict_free(&s->exif_metadata);
2428  av_freep(&s->stereo3d);
2429  s->adobe_transform = -1;
2430 
2431  if (s->iccnum != 0)
2433 
2434  ret = mjpeg_get_packet(avctx);
2435  if (ret < 0)
2436  return ret;
2437 redo_for_pal8:
2438  buf_ptr = s->pkt->data;
2439  buf_end = s->pkt->data + s->pkt->size;
2440  while (buf_ptr < buf_end) {
2441  /* find start next marker */
2442  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2443  &unescaped_buf_ptr,
2444  &unescaped_buf_size);
2445  /* EOF */
2446  if (start_code < 0) {
2447  break;
2448  } else if (unescaped_buf_size > INT_MAX / 8) {
2449  av_log(avctx, AV_LOG_ERROR,
2450  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2451  start_code, unescaped_buf_size, s->pkt->size);
2452  return AVERROR_INVALIDDATA;
2453  }
2454  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2455  start_code, buf_end - buf_ptr);
2456 
2457  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2458 
2459  if (ret < 0) {
2460  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2461  goto fail;
2462  }
2463 
2464  s->start_code = start_code;
2465  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2466  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2467 
2468  /* process markers */
2469  if (start_code >= RST0 && start_code <= RST7) {
2470  av_log(avctx, AV_LOG_DEBUG,
2471  "restart marker: %d\n", start_code & 0x0f);
2472  /* APP fields */
2473  } else if (start_code >= APP0 && start_code <= APP15) {
2474  if ((ret = mjpeg_decode_app(s)) < 0)
2475  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2476  av_err2str(ret));
2477  /* Comment */
2478  } else if (start_code == COM) {
2479  ret = mjpeg_decode_com(s);
2480  if (ret < 0)
2481  return ret;
2482  } else if (start_code == DQT) {
2484  if (ret < 0)
2485  return ret;
2486  }
2487 
2488  ret = -1;
2489 
2490  if (!CONFIG_JPEGLS_DECODER &&
2491  (start_code == SOF48 || start_code == LSE)) {
2492  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2493  return AVERROR(ENOSYS);
2494  }
2495 
2496  if (avctx->skip_frame == AVDISCARD_ALL) {
2497  switch(start_code) {
2498  case SOF0:
2499  case SOF1:
2500  case SOF2:
2501  case SOF3:
2502  case SOF48:
2503  case SOI:
2504  case SOS:
2505  case EOI:
2506  break;
2507  default:
2508  goto skip;
2509  }
2510  }
2511 
2512  switch (start_code) {
2513  case SOI:
2514  s->restart_interval = 0;
2515  s->restart_count = 0;
2516  s->raw_image_buffer = buf_ptr;
2517  s->raw_image_buffer_size = buf_end - buf_ptr;
2518  /* nothing to do on SOI */
2519  break;
2520  case DHT:
2521  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2522  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2523  goto fail;
2524  }
2525  break;
2526  case SOF0:
2527  case SOF1:
2528  if (start_code == SOF0)
2529  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2530  else
2532  s->lossless = 0;
2533  s->ls = 0;
2534  s->progressive = 0;
2535  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2536  goto fail;
2537  break;
2538  case SOF2:
2539  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2540  s->lossless = 0;
2541  s->ls = 0;
2542  s->progressive = 1;
2543  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2544  goto fail;
2545  break;
2546  case SOF3:
2547  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2548  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2549  s->lossless = 1;
2550  s->ls = 0;
2551  s->progressive = 0;
2552  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2553  goto fail;
2554  break;
2555  case SOF48:
2556  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2557  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2558  s->lossless = 1;
2559  s->ls = 1;
2560  s->progressive = 0;
2561  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2562  goto fail;
2563  break;
2564  case LSE:
2565  if (!CONFIG_JPEGLS_DECODER ||
2566  (ret = ff_jpegls_decode_lse(s)) < 0)
2567  goto fail;
2568  if (ret == 1)
2569  goto redo_for_pal8;
2570  break;
2571  case EOI:
2572 eoi_parser:
2573  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2574  s->progressive && s->cur_scan && s->got_picture)
2576  s->cur_scan = 0;
2577  if (!s->got_picture) {
2578  av_log(avctx, AV_LOG_WARNING,
2579  "Found EOI before any SOF, ignoring\n");
2580  break;
2581  }
2582  if (s->interlaced) {
2583  s->bottom_field ^= 1;
2584  /* if not bottom field, do not output image yet */
2585  if (s->bottom_field == !s->interlace_polarity)
2586  break;
2587  }
2588  if (avctx->skip_frame == AVDISCARD_ALL) {
2589  s->got_picture = 0;
2590  ret = AVERROR(EAGAIN);
2591  goto the_end_no_picture;
2592  }
2593  if (s->avctx->hwaccel) {
2594  ret = s->avctx->hwaccel->end_frame(s->avctx);
2595  if (ret < 0)
2596  return ret;
2597 
2598  av_freep(&s->hwaccel_picture_private);
2599  }
2600  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2601  return ret;
2602  s->got_picture = 0;
2603 
2604  frame->pkt_dts = s->pkt->dts;
2605 
2606  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2607  int qp = FFMAX3(s->qscale[0],
2608  s->qscale[1],
2609  s->qscale[2]);
2610 
2611  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2612  }
2613 
2614  goto the_end;
2615  case SOS:
2616  s->raw_scan_buffer = buf_ptr;
2617  s->raw_scan_buffer_size = buf_end - buf_ptr;
2618 
2619  s->cur_scan++;
2620  if (avctx->skip_frame == AVDISCARD_ALL) {
2621  skip_bits(&s->gb, get_bits_left(&s->gb));
2622  break;
2623  }
2624 
2625  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2626  (avctx->err_recognition & AV_EF_EXPLODE))
2627  goto fail;
2628  break;
2629  case DRI:
2630  if ((ret = mjpeg_decode_dri(s)) < 0)
2631  return ret;
2632  break;
2633  case SOF5:
2634  case SOF6:
2635  case SOF7:
2636  case SOF9:
2637  case SOF10:
2638  case SOF11:
2639  case SOF13:
2640  case SOF14:
2641  case SOF15:
2642  case JPG:
2643  av_log(avctx, AV_LOG_ERROR,
2644  "mjpeg: unsupported coding type (%x)\n", start_code);
2645  break;
2646  }
2647 
2648 skip:
2649  /* eof process start code */
2650  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2651  av_log(avctx, AV_LOG_DEBUG,
2652  "marker parser used %d bytes (%d bits)\n",
2653  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2654  }
2655  if (s->got_picture && s->cur_scan) {
2656  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2657  goto eoi_parser;
2658  }
2659  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2660  return AVERROR_INVALIDDATA;
2661 fail:
2662  s->got_picture = 0;
2663  return ret;
2664 the_end:
2665 
2666  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2667 
2668  if (AV_RB32(s->upscale_h)) {
2669  int p;
2671  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2672  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2673  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2674  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2675  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2676  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2677  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2678  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2679  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2680  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2681  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2682  );
2683  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2684  if (ret)
2685  return ret;
2686 
2687  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2688  for (p = 0; p<s->nb_components; p++) {
2689  uint8_t *line = s->picture_ptr->data[p];
2690  int w = s->width;
2691  int h = s->height;
2692  if (!s->upscale_h[p])
2693  continue;
2694  if (p==1 || p==2) {
2695  w = AV_CEIL_RSHIFT(w, hshift);
2696  h = AV_CEIL_RSHIFT(h, vshift);
2697  }
2698  if (s->upscale_v[p] == 1)
2699  h = (h+1)>>1;
2700  av_assert0(w > 0);
2701  for (i = 0; i < h; i++) {
2702  if (s->upscale_h[p] == 1) {
2703  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2704  else line[w - 1] = line[(w - 1) / 2];
2705  for (index = w - 2; index > 0; index--) {
2706  if (is16bit)
2707  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2708  else
2709  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2710  }
2711  } else if (s->upscale_h[p] == 2) {
2712  if (is16bit) {
2713  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2714  if (w > 1)
2715  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2716  } else {
2717  line[w - 1] = line[(w - 1) / 3];
2718  if (w > 1)
2719  line[w - 2] = line[w - 1];
2720  }
2721  for (index = w - 3; index > 0; index--) {
2722  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2723  }
2724  }
2725  line += s->linesize[p];
2726  }
2727  }
2728  }
2729  if (AV_RB32(s->upscale_v)) {
2730  int p;
2732  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2733  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2734  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2735  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2736  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2737  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2738  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2739  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2740  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2741  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2742  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2743  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2744  );
2745  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2746  if (ret)
2747  return ret;
2748 
2749  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2750  for (p = 0; p < s->nb_components; p++) {
2751  uint8_t *dst;
2752  int w = s->width;
2753  int h = s->height;
2754  if (!s->upscale_v[p])
2755  continue;
2756  if (p==1 || p==2) {
2757  w = AV_CEIL_RSHIFT(w, hshift);
2758  h = AV_CEIL_RSHIFT(h, vshift);
2759  }
2760  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2761  for (i = h - 1; i; i--) {
2762  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2763  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2764  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2765  memcpy(dst, src1, w);
2766  } else {
2767  for (index = 0; index < w; index++)
2768  dst[index] = (src1[index] + src2[index]) >> 1;
2769  }
2770  dst -= s->linesize[p];
2771  }
2772  }
2773  }
2774  if (s->flipped && !s->rgb) {
2775  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2776  if (ret)
2777  return ret;
2778 
2779  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2780  for (index=0; index<s->nb_components; index++) {
2781  int h = frame->height;
2782  if (index && index < 3)
2783  h = AV_CEIL_RSHIFT(h, vshift);
2784  if (frame->data[index]) {
2785  frame->data[index] += (h - 1) * frame->linesize[index];
2786  frame->linesize[index] *= -1;
2787  }
2788  }
2789  }
2790  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2791  int w = s->picture_ptr->width;
2792  int h = s->picture_ptr->height;
2793  av_assert0(s->nb_components == 4);
2794  for (i=0; i<h; i++) {
2795  int j;
2796  uint8_t *dst[4];
2797  for (index=0; index<4; index++) {
2798  dst[index] = s->picture_ptr->data[index]
2799  + s->picture_ptr->linesize[index]*i;
2800  }
2801  for (j=0; j<w; j++) {
2802  int k = dst[3][j];
2803  int r = dst[0][j] * k;
2804  int g = dst[1][j] * k;
2805  int b = dst[2][j] * k;
2806  dst[0][j] = g*257 >> 16;
2807  dst[1][j] = b*257 >> 16;
2808  dst[2][j] = r*257 >> 16;
2809  dst[3][j] = 255;
2810  }
2811  }
2812  }
2813  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2814  int w = s->picture_ptr->width;
2815  int h = s->picture_ptr->height;
2816  av_assert0(s->nb_components == 4);
2817  for (i=0; i<h; i++) {
2818  int j;
2819  uint8_t *dst[4];
2820  for (index=0; index<4; index++) {
2821  dst[index] = s->picture_ptr->data[index]
2822  + s->picture_ptr->linesize[index]*i;
2823  }
2824  for (j=0; j<w; j++) {
2825  int k = dst[3][j];
2826  int r = (255 - dst[0][j]) * k;
2827  int g = (128 - dst[1][j]) * k;
2828  int b = (128 - dst[2][j]) * k;
2829  dst[0][j] = r*257 >> 16;
2830  dst[1][j] = (g*257 >> 16) + 128;
2831  dst[2][j] = (b*257 >> 16) + 128;
2832  dst[3][j] = 255;
2833  }
2834  }
2835  }
2836 
2837  if (s->stereo3d) {
2839  if (stereo) {
2840  stereo->type = s->stereo3d->type;
2841  stereo->flags = s->stereo3d->flags;
2842  }
2843  av_freep(&s->stereo3d);
2844  }
2845 
2846  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2847  AVFrameSideData *sd;
2848  size_t offset = 0;
2849  int total_size = 0;
2850  int i;
2851 
2852  /* Sum size of all parts. */
2853  for (i = 0; i < s->iccnum; i++)
2854  total_size += s->iccentries[i].length;
2855 
2857  if (!sd) {
2858  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2859  return AVERROR(ENOMEM);
2860  }
2861 
2862  /* Reassemble the parts, which are now in-order. */
2863  for (i = 0; i < s->iccnum; i++) {
2864  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2865  offset += s->iccentries[i].length;
2866  }
2867  }
2868 
2869  if (e = av_dict_get(s->exif_metadata, "Orientation", e, AV_DICT_IGNORE_SUFFIX)) {
2870  char *value = e->value + strspn(e->value, " \n\t\r"), *endptr;
2871  int orientation = strtol(value, &endptr, 0);
2872 
2873  if (!*endptr) {
2874  AVFrameSideData *sd = NULL;
2875 
2876  if (orientation >= 2 && orientation <= 8) {
2877  int32_t *matrix;
2878 
2880  if (!sd) {
2881  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2882  return AVERROR(ENOMEM);
2883  }
2884 
2885  matrix = (int32_t *)sd->data;
2886 
2887  switch (orientation) {
2888  case 2:
2891  break;
2892  case 3:
2894  break;
2895  case 4:
2898  break;
2899  case 5:
2902  break;
2903  case 6:
2905  break;
2906  case 7:
2909  break;
2910  case 8:
2912  break;
2913  default:
2914  av_assert0(0);
2915  }
2916  }
2917  }
2918  }
2919 
2920  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2921  av_dict_free(&s->exif_metadata);
2922 
2923  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
2924  ret = smv_process_frame(avctx, frame);
2925  if (ret < 0) {
2927  return ret;
2928  }
2929  }
2930  if ((avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2931  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2932  avctx->coded_height > s->orig_height) {
2933  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2934  frame->crop_top = frame->height - avctx->height;
2935  }
2936 
2937  ret = 0;
2938 
2939 the_end_no_picture:
2940  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2941  buf_end - buf_ptr);
2942 
2943  return ret;
2944 }
2945 
2946 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2947  * even without having called ff_mjpeg_decode_init(). */
2949 {
2950  MJpegDecodeContext *s = avctx->priv_data;
2951  int i, j;
2952 
2953  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2954  av_log(avctx, AV_LOG_INFO, "Single field\n");
2955  }
2956 
2957  if (s->picture) {
2958  av_frame_free(&s->picture);
2959  s->picture_ptr = NULL;
2960  } else if (s->picture_ptr)
2961  av_frame_unref(s->picture_ptr);
2962 
2963  av_frame_free(&s->smv_frame);
2964 
2965  av_freep(&s->buffer);
2966  av_freep(&s->stereo3d);
2967  av_freep(&s->ljpeg_buffer);
2968  s->ljpeg_buffer_size = 0;
2969 
2970  for (i = 0; i < 3; i++) {
2971  for (j = 0; j < 4; j++)
2972  ff_free_vlc(&s->vlcs[i][j]);
2973  }
2974  for (i = 0; i < MAX_COMPONENTS; i++) {
2975  av_freep(&s->blocks[i]);
2976  av_freep(&s->last_nnz[i]);
2977  }
2978  av_dict_free(&s->exif_metadata);
2979 
2981 
2982  av_freep(&s->hwaccel_picture_private);
2983  av_freep(&s->jls_state);
2984 
2985  return 0;
2986 }
2987 
2988 static void decode_flush(AVCodecContext *avctx)
2989 {
2990  MJpegDecodeContext *s = avctx->priv_data;
2991  s->got_picture = 0;
2992 
2993  s->smv_next_frame = 0;
2994  av_frame_unref(s->smv_frame);
2995 }
2996 
2997 #if CONFIG_MJPEG_DECODER
2998 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2999 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
3000 static const AVOption options[] = {
3001  { "extern_huff", "Use external huffman table.",
3002  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
3003  { NULL },
3004 };
3005 
3006 static const AVClass mjpegdec_class = {
3007  .class_name = "MJPEG decoder",
3008  .item_name = av_default_item_name,
3009  .option = options,
3010  .version = LIBAVUTIL_VERSION_INT,
3011 };
3012 
3013 const FFCodec ff_mjpeg_decoder = {
3014  .p.name = "mjpeg",
3015  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
3016  .p.type = AVMEDIA_TYPE_VIDEO,
3017  .p.id = AV_CODEC_ID_MJPEG,
3018  .priv_data_size = sizeof(MJpegDecodeContext),
3020  .close = ff_mjpeg_decode_end,
3022  .flush = decode_flush,
3023  .p.capabilities = AV_CODEC_CAP_DR1,
3024  .p.max_lowres = 3,
3025  .p.priv_class = &mjpegdec_class,
3026  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
3027  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3031  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3032 #if CONFIG_MJPEG_NVDEC_HWACCEL
3033  HWACCEL_NVDEC(mjpeg),
3034 #endif
3035 #if CONFIG_MJPEG_VAAPI_HWACCEL
3036  HWACCEL_VAAPI(mjpeg),
3037 #endif
3038  NULL
3039  },
3040 };
3041 #endif
3042 #if CONFIG_THP_DECODER
3043 const FFCodec ff_thp_decoder = {
3044  .p.name = "thp",
3045  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
3046  .p.type = AVMEDIA_TYPE_VIDEO,
3047  .p.id = AV_CODEC_ID_THP,
3048  .priv_data_size = sizeof(MJpegDecodeContext),
3050  .close = ff_mjpeg_decode_end,
3052  .flush = decode_flush,
3053  .p.capabilities = AV_CODEC_CAP_DR1,
3054  .p.max_lowres = 3,
3055  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3057 };
3058 #endif
3059 
3060 #if CONFIG_SMVJPEG_DECODER
3061 const FFCodec ff_smvjpeg_decoder = {
3062  .p.name = "smvjpeg",
3063  CODEC_LONG_NAME("SMV JPEG"),
3064  .p.type = AVMEDIA_TYPE_VIDEO,
3065  .p.id = AV_CODEC_ID_SMVJPEG,
3066  .priv_data_size = sizeof(MJpegDecodeContext),
3068  .close = ff_mjpeg_decode_end,
3070  .flush = decode_flush,
3071  .p.capabilities = AV_CODEC_CAP_DR1,
3072  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3074 };
3075 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:97
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1369
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:205
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:204
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:975
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1147
out
FILE * out
Definition: movenc.c:54
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1401
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:679
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2988
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2858
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:957
SOF0
@ SOF0
Definition: mjpeg.h:39
matrix
Definition: vc1dsp.c:42
src1
const pixel * src1
Definition: h264pred_template.c:421
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1351
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:696
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:273
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:113
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:198
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:221
AVFrame::width
int width
Definition: frame.h:397
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:501
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1654
smv_process_frame
static int smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2351
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:66
internal.h
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1004
AVOption
AVOption.
Definition: opt.h:251
b
#define b
Definition: input.c:41
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:791
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:146
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:119
mjpeg_get_packet
static int mjpeg_get_packet(AVCodecContext *avctx)
Definition: mjpegdec.c:2385
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1328
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2898
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:593
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
rgb
Definition: rpzaenc.c:59
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:241
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1247
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1417
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:123
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:407
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:122
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1698
fail
#define fail()
Definition: checkasm.h:134
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:503
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1650
GetBitContext
Definition: get_bits.h:61
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2150
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:59
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:478
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2886
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: codec_par.h:39
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:586
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:443
ff_sp5x_process_packet
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: sp5xdec.c:35
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
av_bswap32
#define av_bswap32
Definition: bswap.h:33
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:276
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:169
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:243
aligned
static int aligned(int val)
Definition: dashdec.c:168
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:858
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:471
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1848
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1652
mask
static const uint16_t mask[17]
Definition: lzw.c:38
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:60
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1040
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:500
width
#define width
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:102
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:472
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1653
g
const char * g
Definition: vf_curves.c:127
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:376
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:354
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:363
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:159
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:470
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2335
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2948
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:264
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:408
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:182
if
if(ret)
Definition: filter_design.txt:179
ff_mjpeg_receive_frame
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2409
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:478
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:449
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_CODEC_ID_SP5X
@ AV_CODEC_ID_SP5X
Definition: codec_id.h:62
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
av_clip_int16
#define av_clip_int16
Definition: common.h:110
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:149
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:450
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1608
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:203
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:198
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:433
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
tiff_common.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
MJpegDecodeContext
Definition: mjpegdec.h:54
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1432
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:975
lowres
static int lowres
Definition: ffplay.c:335
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1553
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1458
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:507
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1450
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1075
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:353
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:50
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:876
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1643
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:267
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:233
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:412
AVCodecHWConfigInternal
Definition: hwconfig.h:29
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
VD
#define VD
Definition: av1dec.c:1234
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2190
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:164
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:218
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:169
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2043
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:809
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:499
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1651
src2
const pixel * src2
Definition: h264pred_template.c:422
display.h
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1821
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:128
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:49
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1335
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:487
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:211
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:989
len
int len
Definition: vorbis_enc_data.h:426
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: codec_internal.h:49
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:571
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:608
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: vlc.c:375
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:82
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:949
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:29
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:177
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:683
pos
unsigned int pos
Definition: spdifenc.c:412
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1332
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2215
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
id
enum AVCodecID id
Definition: dts2pts_bsf.c:362
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:398
AVFrame::height
int height
Definition: frame.h:397
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:304
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:683
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:228
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1327
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:302
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:586
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1833
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1046
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:231
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:423
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:565
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: codec_par.h:42
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:173
AVDictionaryEntry::value
char * value
Definition: dict.h:91
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:354
re
float re
Definition: fft.c:79