FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/display.h"
36 #include "libavutil/emms.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/avassert.h"
39 #include "libavutil/mem.h"
40 #include "libavutil/opt.h"
41 #include "avcodec.h"
42 #include "blockdsp.h"
43 #include "codec_internal.h"
44 #include "copy_block.h"
45 #include "decode.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "jpegtables.h"
51 #include "mjpeg.h"
52 #include "mjpegdec.h"
53 #include "jpeglsdec.h"
54 #include "profiles.h"
55 #include "put_bits.h"
56 #include "exif_internal.h"
57 #include "bytestream.h"
58 
59 
61 {
62  static const struct {
63  int class;
64  int index;
65  const uint8_t *bits;
66  const uint8_t *values;
67  int length;
68  } ht[] = {
70  ff_mjpeg_val_dc, 12 },
72  ff_mjpeg_val_dc, 12 },
81  };
82  int i, ret;
83 
84  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
85  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
86  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
87  ht[i].bits, ht[i].values,
88  ht[i].class == 1, s->avctx);
89  if (ret < 0)
90  return ret;
91 
92  if (ht[i].class < 2) {
93  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
94  ht[i].bits + 1, 16);
95  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
96  ht[i].values, ht[i].length);
97  }
98  }
99 
100  return 0;
101 }
102 
103 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
104 {
105  s->buggy_avid = 1;
106  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
107  s->interlace_polarity = 1;
108  if (len > 14 && buf[12] == 2) /* 2 - PAL */
109  s->interlace_polarity = 0;
110  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
111  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
112 }
113 
114 static void init_idct(AVCodecContext *avctx)
115 {
116  MJpegDecodeContext *s = avctx->priv_data;
117 
118  ff_idctdsp_init(&s->idsp, avctx);
119  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
120  s->idsp.idct_permutation);
121 }
122 
124 {
125  MJpegDecodeContext *s = avctx->priv_data;
126  int ret;
127 
128  if (!s->picture_ptr) {
129  s->picture = av_frame_alloc();
130  if (!s->picture)
131  return AVERROR(ENOMEM);
132  s->picture_ptr = s->picture;
133  }
134 
135  s->avctx = avctx;
136  ff_blockdsp_init(&s->bdsp);
137  ff_hpeldsp_init(&s->hdsp, avctx->flags);
138  init_idct(avctx);
139  s->buffer_size = 0;
140  s->buffer = NULL;
141  s->start_code = -1;
142  s->first_picture = 1;
143  s->got_picture = 0;
144  s->orig_height = avctx->coded_height;
146  avctx->colorspace = AVCOL_SPC_BT470BG;
147  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
148 
149  if ((ret = init_default_huffman_tables(s)) < 0)
150  return ret;
151 
152  if (s->extern_huff) {
153  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
154  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
155  return ret;
156  if (ff_mjpeg_decode_dht(s)) {
157  av_log(avctx, AV_LOG_ERROR,
158  "error using external huffman table, switching back to internal\n");
159  if ((ret = init_default_huffman_tables(s)) < 0)
160  return ret;
161  }
162  }
163  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
164  s->interlace_polarity = 1; /* bottom field first */
165  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
166  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
167  if (avctx->codec_tag == AV_RL32("MJPG"))
168  s->interlace_polarity = 1;
169  }
170 
171  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
172  if (avctx->extradata_size >= 4)
173  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
174 
175  if (s->smv_frames_per_jpeg <= 0) {
176  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
177  return AVERROR_INVALIDDATA;
178  }
179 
180  s->smv_frame = av_frame_alloc();
181  if (!s->smv_frame)
182  return AVERROR(ENOMEM);
183  } else if (avctx->extradata_size > 8
184  && AV_RL32(avctx->extradata) == 0x2C
185  && AV_RL32(avctx->extradata+4) == 0x18) {
186  parse_avid(s, avctx->extradata, avctx->extradata_size);
187  }
188 
189  if (avctx->codec->id == AV_CODEC_ID_AMV)
190  s->flipped = 1;
191 
192  return 0;
193 }
194 
195 
196 /* quantize tables */
198 {
199  int len, index, i;
200 
201  len = get_bits(&s->gb, 16) - 2;
202 
203  if (8*len > get_bits_left(&s->gb)) {
204  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
205  return AVERROR_INVALIDDATA;
206  }
207 
208  while (len >= 65) {
209  int pr = get_bits(&s->gb, 4);
210  if (pr > 1) {
211  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
212  return AVERROR_INVALIDDATA;
213  }
214  index = get_bits(&s->gb, 4);
215  if (index >= 4)
216  return -1;
217  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
218  /* read quant table */
219  for (i = 0; i < 64; i++) {
220  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
221  if (s->quant_matrixes[index][i] == 0) {
222  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
223  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
224  if (s->avctx->err_recognition & AV_EF_EXPLODE)
225  return AVERROR_INVALIDDATA;
226  }
227  }
228 
229  // XXX FIXME fine-tune, and perhaps add dc too
230  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
231  s->quant_matrixes[index][8]) >> 1;
232  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
233  index, s->qscale[index]);
234  len -= 1 + 64 * (1+pr);
235  }
236  return 0;
237 }
238 
239 /* decode huffman tables and build VLC decoders */
241 {
242  int len, index, i, class, n, v;
243  uint8_t bits_table[17];
244  uint8_t val_table[256];
245  int ret = 0;
246 
247  len = get_bits(&s->gb, 16) - 2;
248 
249  if (8*len > get_bits_left(&s->gb)) {
250  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
251  return AVERROR_INVALIDDATA;
252  }
253 
254  while (len > 0) {
255  if (len < 17)
256  return AVERROR_INVALIDDATA;
257  class = get_bits(&s->gb, 4);
258  if (class >= 2)
259  return AVERROR_INVALIDDATA;
260  index = get_bits(&s->gb, 4);
261  if (index >= 4)
262  return AVERROR_INVALIDDATA;
263  n = 0;
264  for (i = 1; i <= 16; i++) {
265  bits_table[i] = get_bits(&s->gb, 8);
266  n += bits_table[i];
267  }
268  len -= 17;
269  if (len < n || n > 256)
270  return AVERROR_INVALIDDATA;
271 
272  for (i = 0; i < n; i++) {
273  v = get_bits(&s->gb, 8);
274  val_table[i] = v;
275  }
276  len -= n;
277 
278  /* build VLC and flush previous vlc if present */
279  ff_vlc_free(&s->vlcs[class][index]);
280  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
281  class, index, n);
282  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
283  val_table, class > 0, s->avctx)) < 0)
284  return ret;
285 
286  if (class > 0) {
287  ff_vlc_free(&s->vlcs[2][index]);
288  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
289  val_table, 0, s->avctx)) < 0)
290  return ret;
291  }
292 
293  for (i = 0; i < 16; i++)
294  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
295  for (i = 0; i < 256; i++)
296  s->raw_huffman_values[class][index][i] = val_table[i];
297  }
298  return 0;
299 }
300 
302 {
303  int len, nb_components, i, width, height, bits, ret, size_change;
304  unsigned pix_fmt_id;
305  int h_count[MAX_COMPONENTS] = { 0 };
306  int v_count[MAX_COMPONENTS] = { 0 };
307 
308  s->cur_scan = 0;
309  memset(s->upscale_h, 0, sizeof(s->upscale_h));
310  memset(s->upscale_v, 0, sizeof(s->upscale_v));
311 
312  len = get_bits(&s->gb, 16);
313  bits = get_bits(&s->gb, 8);
314 
315  if (bits > 16 || bits < 1) {
316  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
317  return AVERROR_INVALIDDATA;
318  }
319 
320  if (s->avctx->bits_per_raw_sample != bits) {
321  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
322  s->avctx->bits_per_raw_sample = bits;
323  init_idct(s->avctx);
324  }
325  if (s->pegasus_rct)
326  bits = 9;
327  if (bits == 9 && !s->pegasus_rct)
328  s->rct = 1; // FIXME ugly
329 
330  if(s->lossless && s->avctx->lowres){
331  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
332  return -1;
333  }
334 
335  height = get_bits(&s->gb, 16);
336  width = get_bits(&s->gb, 16);
337 
338  // HACK for odd_height.mov
339  if (s->interlaced && s->width == width && s->height == height + 1)
340  height= s->height;
341 
342  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
343  if (av_image_check_size(width, height, 0, s->avctx) < 0)
344  return AVERROR_INVALIDDATA;
345  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
346  return AVERROR_INVALIDDATA;
347 
348  nb_components = get_bits(&s->gb, 8);
349  if (nb_components <= 0 ||
350  nb_components > MAX_COMPONENTS)
351  return -1;
352  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
353  if (nb_components != s->nb_components) {
354  av_log(s->avctx, AV_LOG_ERROR,
355  "nb_components changing in interlaced picture\n");
356  return AVERROR_INVALIDDATA;
357  }
358  }
359  if (s->ls && !(bits <= 8 || nb_components == 1)) {
361  "JPEG-LS that is not <= 8 "
362  "bits/component or 16-bit gray");
363  return AVERROR_PATCHWELCOME;
364  }
365  if (len != 8 + 3 * nb_components) {
366  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
367  return AVERROR_INVALIDDATA;
368  }
369 
370  s->nb_components = nb_components;
371  s->h_max = 1;
372  s->v_max = 1;
373  for (i = 0; i < nb_components; i++) {
374  /* component id */
375  s->component_id[i] = get_bits(&s->gb, 8);
376  h_count[i] = get_bits(&s->gb, 4);
377  v_count[i] = get_bits(&s->gb, 4);
378  /* compute hmax and vmax (only used in interleaved case) */
379  if (h_count[i] > s->h_max)
380  s->h_max = h_count[i];
381  if (v_count[i] > s->v_max)
382  s->v_max = v_count[i];
383  s->quant_index[i] = get_bits(&s->gb, 8);
384  if (s->quant_index[i] >= 4) {
385  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
386  return AVERROR_INVALIDDATA;
387  }
388  if (!h_count[i] || !v_count[i]) {
389  av_log(s->avctx, AV_LOG_ERROR,
390  "Invalid sampling factor in component %d %d:%d\n",
391  i, h_count[i], v_count[i]);
392  return AVERROR_INVALIDDATA;
393  }
394 
395  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
396  i, h_count[i], v_count[i],
397  s->component_id[i], s->quant_index[i]);
398  }
399  if ( nb_components == 4
400  && s->component_id[0] == 'C'
401  && s->component_id[1] == 'M'
402  && s->component_id[2] == 'Y'
403  && s->component_id[3] == 'K')
404  s->adobe_transform = 0;
405 
406  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
407  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
408  return AVERROR_PATCHWELCOME;
409  }
410 
411  if (s->bayer) {
412  if (nb_components == 2) {
413  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
414  width stored in their SOF3 markers is the width of each one. We only output
415  a single component, therefore we need to adjust the output image width. We
416  handle the deinterleaving (but not the debayering) in this file. */
417  width *= 2;
418  }
419  /* They can also contain 1 component, which is double the width and half the height
420  of the final image (rows are interleaved). We don't handle the decoding in this
421  file, but leave that to the TIFF/DNG decoder. */
422  }
423 
424  /* if different size, realloc/alloc picture */
425  if (width != s->width || height != s->height || bits != s->bits ||
426  memcmp(s->h_count, h_count, sizeof(h_count)) ||
427  memcmp(s->v_count, v_count, sizeof(v_count))) {
428  size_change = 1;
429 
430  s->width = width;
431  s->height = height;
432  s->bits = bits;
433  memcpy(s->h_count, h_count, sizeof(h_count));
434  memcpy(s->v_count, v_count, sizeof(v_count));
435  s->interlaced = 0;
436  s->got_picture = 0;
437 
438  /* test interlaced mode */
439  if (s->first_picture &&
440  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
441  s->orig_height != 0 &&
442  s->height < ((s->orig_height * 3) / 4)) {
443  s->interlaced = 1;
444  s->bottom_field = s->interlace_polarity;
445  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
446  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
447  height *= 2;
448  }
449 
450  ret = ff_set_dimensions(s->avctx, width, height);
451  if (ret < 0)
452  return ret;
453 
454  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
455  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
456  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
457  s->orig_height < height)
458  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
459 
460  s->first_picture = 0;
461  } else {
462  size_change = 0;
463  }
464 
465  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
466  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
467  if (s->avctx->height <= 0)
468  return AVERROR_INVALIDDATA;
469  }
470  if (s->bayer && s->progressive) {
471  avpriv_request_sample(s->avctx, "progressively coded bayer picture");
472  return AVERROR_INVALIDDATA;
473  }
474 
475  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
476  if (s->progressive) {
477  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
478  return AVERROR_INVALIDDATA;
479  }
480  } else {
481  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
482  s->rgb = 1;
483  else if (!s->lossless)
484  s->rgb = 0;
485  /* XXX: not complete test ! */
486  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
487  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
488  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
489  (s->h_count[3] << 4) | s->v_count[3];
490  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
491  /* NOTE we do not allocate pictures large enough for the possible
492  * padding of h/v_count being 4 */
493  if (!(pix_fmt_id & 0xD0D0D0D0))
494  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
495  if (!(pix_fmt_id & 0x0D0D0D0D))
496  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
497 
498  for (i = 0; i < 8; i++) {
499  int j = 6 + (i&1) - (i&6);
500  int is = (pix_fmt_id >> (4*i)) & 0xF;
501  int js = (pix_fmt_id >> (4*j)) & 0xF;
502 
503  if (is == 1 && js != 2 && (i < 2 || i > 5))
504  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
505  if (is == 1 && js != 2 && (i < 2 || i > 5))
506  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
507 
508  if (is == 1 && js == 2) {
509  if (i & 1) s->upscale_h[j/2] = 1;
510  else s->upscale_v[j/2] = 1;
511  }
512  }
513 
514  if (s->bayer) {
515  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
516  goto unk_pixfmt;
517  }
518 
519  switch (pix_fmt_id) {
520  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
521  if (!s->bayer)
522  goto unk_pixfmt;
523  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
524  break;
525  case 0x11111100:
526  if (s->rgb)
527  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
528  else {
529  if ( s->adobe_transform == 0
530  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
531  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
532  } else {
533  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
534  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
535  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
536  }
537  }
538  av_assert0(s->nb_components == 3);
539  break;
540  case 0x11111111:
541  if (s->rgb)
542  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
543  else {
544  if (s->adobe_transform == 0 && s->bits <= 8) {
545  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
546  } else {
547  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
548  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
549  }
550  }
551  av_assert0(s->nb_components == 4);
552  break;
553  case 0x11412100:
554  if (s->bits > 8)
555  goto unk_pixfmt;
556  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
557  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
558  s->upscale_h[0] = 4;
559  s->upscale_h[1] = 0;
560  s->upscale_h[2] = 1;
561  } else {
562  goto unk_pixfmt;
563  }
564  break;
565  case 0x22111122:
566  case 0x22111111:
567  if (s->adobe_transform == 0 && s->bits <= 8) {
568  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
569  s->upscale_v[1] = s->upscale_v[2] = 1;
570  s->upscale_h[1] = s->upscale_h[2] = 1;
571  } else if (s->adobe_transform == 2 && s->bits <= 8) {
572  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
573  s->upscale_v[1] = s->upscale_v[2] = 1;
574  s->upscale_h[1] = s->upscale_h[2] = 1;
575  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
576  } else {
577  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
578  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
579  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
580  }
581  av_assert0(s->nb_components == 4);
582  break;
583  case 0x12121100:
584  case 0x22122100:
585  case 0x21211100:
586  case 0x21112100:
587  case 0x22211200:
588  case 0x22221100:
589  case 0x22112200:
590  case 0x11222200:
591  if (s->bits > 8)
592  goto unk_pixfmt;
593  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
594  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
595  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
596  } else {
597  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
598  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
599  }
600  break;
601  case 0x11000000:
602  case 0x13000000:
603  case 0x14000000:
604  case 0x31000000:
605  case 0x33000000:
606  case 0x34000000:
607  case 0x41000000:
608  case 0x43000000:
609  case 0x44000000:
610  if(s->bits <= 8)
611  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
612  else
613  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
614  break;
615  case 0x12111100:
616  case 0x14121200:
617  case 0x14111100:
618  case 0x22211100:
619  case 0x22112100:
620  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
621  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
622  else
623  goto unk_pixfmt;
624  s->upscale_v[1] = s->upscale_v[2] = 1;
625  } else {
626  if (pix_fmt_id == 0x14111100)
627  s->upscale_v[1] = s->upscale_v[2] = 1;
628  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
629  else
630  goto unk_pixfmt;
631  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
632  }
633  break;
634  case 0x21111100:
635  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
636  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
637  else
638  goto unk_pixfmt;
639  s->upscale_h[1] = s->upscale_h[2] = 1;
640  } else {
641  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
642  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
643  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
644  }
645  break;
646  case 0x11311100:
647  if (s->bits > 8)
648  goto unk_pixfmt;
649  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
650  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
651  else
652  goto unk_pixfmt;
653  s->upscale_h[0] = s->upscale_h[2] = 2;
654  break;
655  case 0x31111100:
656  if (s->bits > 8)
657  goto unk_pixfmt;
658  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
659  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
660  s->upscale_h[1] = s->upscale_h[2] = 2;
661  break;
662  case 0x22121100:
663  case 0x22111200:
664  case 0x41211100:
665  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
666  else
667  goto unk_pixfmt;
668  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
669  break;
670  case 0x22111100:
671  case 0x23111100:
672  case 0x42111100:
673  case 0x24111100:
674  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
675  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
676  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
677  if (pix_fmt_id == 0x42111100) {
678  if (s->bits > 8)
679  goto unk_pixfmt;
680  s->upscale_h[1] = s->upscale_h[2] = 1;
681  } else if (pix_fmt_id == 0x24111100) {
682  if (s->bits > 8)
683  goto unk_pixfmt;
684  s->upscale_v[1] = s->upscale_v[2] = 1;
685  } else if (pix_fmt_id == 0x23111100) {
686  if (s->bits > 8)
687  goto unk_pixfmt;
688  s->upscale_v[1] = s->upscale_v[2] = 2;
689  }
690  break;
691  case 0x41111100:
692  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
693  else
694  goto unk_pixfmt;
695  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
696  break;
697  default:
698  unk_pixfmt:
699  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
700  memset(s->upscale_h, 0, sizeof(s->upscale_h));
701  memset(s->upscale_v, 0, sizeof(s->upscale_v));
702  return AVERROR_PATCHWELCOME;
703  }
704  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
705  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
706  return AVERROR_PATCHWELCOME;
707  }
708  if (s->ls) {
709  memset(s->upscale_h, 0, sizeof(s->upscale_h));
710  memset(s->upscale_v, 0, sizeof(s->upscale_v));
711  if (s->nb_components == 3) {
712  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
713  } else if (s->nb_components != 1) {
714  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
715  return AVERROR_PATCHWELCOME;
716  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
717  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
718  else if (s->bits <= 8)
719  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
720  else
721  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
722  }
723 
724  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
725  if (!s->pix_desc) {
726  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
727  return AVERROR_BUG;
728  }
729 
730  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
731  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
732  } else {
733  enum AVPixelFormat pix_fmts[] = {
734 #if CONFIG_MJPEG_NVDEC_HWACCEL
736 #endif
737 #if CONFIG_MJPEG_VAAPI_HWACCEL
739 #endif
740  s->avctx->pix_fmt,
742  };
743  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
744  if (s->hwaccel_pix_fmt < 0)
745  return AVERROR(EINVAL);
746 
747  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
748  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
749  }
750 
751  if (s->avctx->skip_frame == AVDISCARD_ALL) {
752  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
753  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
754  s->got_picture = 1;
755  return 0;
756  }
757 
758  av_frame_unref(s->picture_ptr);
759  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
760  return -1;
761  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
762  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
763  s->got_picture = 1;
764 
765  // Lets clear the palette to avoid leaving uninitialized values in it
766  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
767  memset(s->picture_ptr->data[1], 0, 1024);
768 
769  for (i = 0; i < 4; i++)
770  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
771 
772  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
773  s->width, s->height, s->linesize[0], s->linesize[1],
774  s->interlaced, s->avctx->height);
775 
776  }
777 
778  if ((s->rgb && !s->lossless && !s->ls) ||
779  (!s->rgb && s->ls && s->nb_components > 1) ||
780  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
781  ) {
782  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
783  return AVERROR_PATCHWELCOME;
784  }
785 
786  /* totally blank picture as progressive JPEG will only add details to it */
787  if (s->progressive) {
788  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
789  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
790  for (i = 0; i < s->nb_components; i++) {
791  int size = bw * bh * s->h_count[i] * s->v_count[i];
792  av_freep(&s->blocks[i]);
793  av_freep(&s->last_nnz[i]);
794  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
795  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
796  if (!s->blocks[i] || !s->last_nnz[i])
797  return AVERROR(ENOMEM);
798  s->block_stride[i] = bw * s->h_count[i];
799  }
800  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
801  }
802 
803  if (s->avctx->hwaccel) {
804  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
805  s->hwaccel_picture_private =
806  av_mallocz(hwaccel->frame_priv_data_size);
807  if (!s->hwaccel_picture_private)
808  return AVERROR(ENOMEM);
809 
810  ret = hwaccel->start_frame(s->avctx, NULL, s->raw_image_buffer,
811  s->raw_image_buffer_size);
812  if (ret < 0)
813  return ret;
814  }
815 
816  return 0;
817 }
818 
819 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
820 {
821  int code;
822  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
823  if (code < 0 || code > 16) {
824  av_log(s->avctx, AV_LOG_WARNING,
825  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
826  0, dc_index, &s->vlcs[0][dc_index]);
827  return 0xfffff;
828  }
829 
830  if (code)
831  return get_xbits(&s->gb, code);
832  else
833  return 0;
834 }
835 
836 /* decode block and dequantize */
837 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
838  int dc_index, int ac_index, uint16_t *quant_matrix)
839 {
840  int code, i, j, level, val;
841 
842  /* DC coef */
843  val = mjpeg_decode_dc(s, dc_index);
844  if (val == 0xfffff) {
845  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
846  return AVERROR_INVALIDDATA;
847  }
848  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
849  s->last_dc[component] = val;
850  block[0] = av_clip_int16(val);
851  /* AC coefs */
852  i = 0;
853  {OPEN_READER(re, &s->gb);
854  do {
855  UPDATE_CACHE(re, &s->gb);
856  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
857 
858  i += ((unsigned)code) >> 4;
859  code &= 0xf;
860  if (code) {
861  if (code > MIN_CACHE_BITS - 16)
862  UPDATE_CACHE(re, &s->gb);
863 
864  {
865  int cache = GET_CACHE(re, &s->gb);
866  int sign = (~cache) >> 31;
867  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
868  }
869 
870  LAST_SKIP_BITS(re, &s->gb, code);
871 
872  if (i > 63) {
873  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
874  return AVERROR_INVALIDDATA;
875  }
876  j = s->permutated_scantable[i];
877  block[j] = level * quant_matrix[i];
878  }
879  } while (i < 63);
880  CLOSE_READER(re, &s->gb);}
881 
882  return 0;
883 }
884 
886  int component, int dc_index,
887  uint16_t *quant_matrix, int Al)
888 {
889  unsigned val;
890  s->bdsp.clear_block(block);
891  val = mjpeg_decode_dc(s, dc_index);
892  if (val == 0xfffff) {
893  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
894  return AVERROR_INVALIDDATA;
895  }
896  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
897  s->last_dc[component] = val;
898  block[0] = val;
899  return 0;
900 }
901 
902 /* decode block and dequantize - progressive JPEG version */
904  uint8_t *last_nnz, int ac_index,
905  uint16_t *quant_matrix,
906  int ss, int se, int Al, int *EOBRUN)
907 {
908  int code, i, j, val, run;
909  unsigned level;
910 
911  if (*EOBRUN) {
912  (*EOBRUN)--;
913  return 0;
914  }
915 
916  {
917  OPEN_READER(re, &s->gb);
918  for (i = ss; ; i++) {
919  UPDATE_CACHE(re, &s->gb);
920  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
921 
922  run = ((unsigned) code) >> 4;
923  code &= 0xF;
924  if (code) {
925  i += run;
926  if (code > MIN_CACHE_BITS - 16)
927  UPDATE_CACHE(re, &s->gb);
928 
929  {
930  int cache = GET_CACHE(re, &s->gb);
931  int sign = (~cache) >> 31;
932  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
933  }
934 
935  LAST_SKIP_BITS(re, &s->gb, code);
936 
937  if (i >= se) {
938  if (i == se) {
939  j = s->permutated_scantable[se];
940  block[j] = level * (quant_matrix[se] << Al);
941  break;
942  }
943  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
944  return AVERROR_INVALIDDATA;
945  }
946  j = s->permutated_scantable[i];
947  block[j] = level * (quant_matrix[i] << Al);
948  } else {
949  if (run == 0xF) {// ZRL - skip 15 coefficients
950  i += 15;
951  if (i >= se) {
952  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
953  return AVERROR_INVALIDDATA;
954  }
955  } else {
956  val = (1 << run);
957  if (run) {
958  UPDATE_CACHE(re, &s->gb);
959  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
960  LAST_SKIP_BITS(re, &s->gb, run);
961  }
962  *EOBRUN = val - 1;
963  break;
964  }
965  }
966  }
967  CLOSE_READER(re, &s->gb);
968  }
969 
970  if (i > *last_nnz)
971  *last_nnz = i;
972 
973  return 0;
974 }
975 
976 #define REFINE_BIT(j) { \
977  UPDATE_CACHE(re, &s->gb); \
978  sign = block[j] >> 15; \
979  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
980  ((quant_matrix[i] ^ sign) - sign) << Al; \
981  LAST_SKIP_BITS(re, &s->gb, 1); \
982 }
983 
984 #define ZERO_RUN \
985 for (; ; i++) { \
986  if (i > last) { \
987  i += run; \
988  if (i > se) { \
989  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
990  return -1; \
991  } \
992  break; \
993  } \
994  j = s->permutated_scantable[i]; \
995  if (block[j]) \
996  REFINE_BIT(j) \
997  else if (run-- == 0) \
998  break; \
999 }
1000 
1001 /* decode block and dequantize - progressive JPEG refinement pass */
1003  uint8_t *last_nnz,
1004  int ac_index, uint16_t *quant_matrix,
1005  int ss, int se, int Al, int *EOBRUN)
1006 {
1007  int code, i = ss, j, sign, val, run;
1008  int last = FFMIN(se, *last_nnz);
1009 
1010  OPEN_READER(re, &s->gb);
1011  if (*EOBRUN) {
1012  (*EOBRUN)--;
1013  } else {
1014  for (; ; i++) {
1015  UPDATE_CACHE(re, &s->gb);
1016  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1017 
1018  if (code & 0xF) {
1019  run = ((unsigned) code) >> 4;
1020  UPDATE_CACHE(re, &s->gb);
1021  val = SHOW_UBITS(re, &s->gb, 1);
1022  LAST_SKIP_BITS(re, &s->gb, 1);
1023  ZERO_RUN;
1024  j = s->permutated_scantable[i];
1025  val--;
1026  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1027  if (i == se) {
1028  if (i > *last_nnz)
1029  *last_nnz = i;
1030  CLOSE_READER(re, &s->gb);
1031  return 0;
1032  }
1033  } else {
1034  run = ((unsigned) code) >> 4;
1035  if (run == 0xF) {
1036  ZERO_RUN;
1037  } else {
1038  val = run;
1039  run = (1 << run);
1040  if (val) {
1041  UPDATE_CACHE(re, &s->gb);
1042  run += SHOW_UBITS(re, &s->gb, val);
1043  LAST_SKIP_BITS(re, &s->gb, val);
1044  }
1045  *EOBRUN = run - 1;
1046  break;
1047  }
1048  }
1049  }
1050 
1051  if (i > *last_nnz)
1052  *last_nnz = i;
1053  }
1054 
1055  for (; i <= last; i++) {
1056  j = s->permutated_scantable[i];
1057  if (block[j])
1058  REFINE_BIT(j)
1059  }
1060  CLOSE_READER(re, &s->gb);
1061 
1062  return 0;
1063 }
1064 #undef REFINE_BIT
1065 #undef ZERO_RUN
1066 
1067 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1068 {
1069  int i;
1070  int reset = 0;
1071 
1072  if (s->restart_interval) {
1073  s->restart_count--;
1074  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1075  align_get_bits(&s->gb);
1076  for (i = 0; i < nb_components; i++) /* reset dc */
1077  s->last_dc[i] = (4 << s->bits);
1078  }
1079 
1080  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1081  /* skip RSTn */
1082  if (s->restart_count == 0) {
1083  if( show_bits(&s->gb, i) == (1 << i) - 1
1084  || show_bits(&s->gb, i) == 0xFF) {
1085  int pos = get_bits_count(&s->gb);
1086  align_get_bits(&s->gb);
1087  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1088  skip_bits(&s->gb, 8);
1089  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1090  for (i = 0; i < nb_components; i++) /* reset dc */
1091  s->last_dc[i] = (4 << s->bits);
1092  reset = 1;
1093  } else
1094  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1095  }
1096  }
1097  }
1098  return reset;
1099 }
1100 
1101 /* Handles 1 to 4 components */
1102 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1103 {
1104  int i, mb_x, mb_y;
1105  unsigned width;
1106  uint16_t (*buffer)[4];
1107  int left[4], top[4], topleft[4];
1108  const int linesize = s->linesize[0];
1109  const int mask = ((1 << s->bits) - 1) << point_transform;
1110  int resync_mb_y = 0;
1111  int resync_mb_x = 0;
1112  int vpred[6];
1113 
1114  if (!s->bayer && s->nb_components < 3)
1115  return AVERROR_INVALIDDATA;
1116  if (s->bayer && s->nb_components > 2)
1117  return AVERROR_INVALIDDATA;
1118  if (s->nb_components <= 0 || s->nb_components > 4)
1119  return AVERROR_INVALIDDATA;
1120  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1121  return AVERROR_INVALIDDATA;
1122  if (s->bayer) {
1123  if (s->rct || s->pegasus_rct)
1124  return AVERROR_INVALIDDATA;
1125  }
1126 
1127 
1128  s->restart_count = s->restart_interval;
1129 
1130  if (s->restart_interval == 0)
1131  s->restart_interval = INT_MAX;
1132 
1133  if (s->bayer)
1134  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1135  else
1136  width = s->mb_width;
1137 
1138  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1139  if (!s->ljpeg_buffer)
1140  return AVERROR(ENOMEM);
1141 
1142  buffer = s->ljpeg_buffer;
1143 
1144  for (i = 0; i < 4; i++)
1145  buffer[0][i] = 1 << (s->bits - 1);
1146 
1147  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1148  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1149 
1150  if (s->interlaced && s->bottom_field)
1151  ptr += linesize >> 1;
1152 
1153  for (i = 0; i < 4; i++)
1154  top[i] = left[i] = topleft[i] = buffer[0][i];
1155 
1156  if ((mb_y * s->width) % s->restart_interval == 0) {
1157  for (i = 0; i < 6; i++)
1158  vpred[i] = 1 << (s->bits-1);
1159  }
1160 
1161  for (mb_x = 0; mb_x < width; mb_x++) {
1162  int modified_predictor = predictor;
1163 
1164  if (get_bits_left(&s->gb) < 1) {
1165  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1166  return AVERROR_INVALIDDATA;
1167  }
1168 
1169  if (s->restart_interval && !s->restart_count){
1170  s->restart_count = s->restart_interval;
1171  resync_mb_x = mb_x;
1172  resync_mb_y = mb_y;
1173  for(i=0; i<4; i++)
1174  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1175  }
1176  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1177  modified_predictor = 1;
1178 
1179  for (i=0;i<nb_components;i++) {
1180  int pred, dc;
1181 
1182  topleft[i] = top[i];
1183  top[i] = buffer[mb_x][i];
1184 
1185  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1186  if(dc == 0xFFFFF)
1187  return -1;
1188 
1189  if (!s->bayer || mb_x) {
1190  pred = left[i];
1191  } else { /* This path runs only for the first line in bayer images */
1192  vpred[i] += dc;
1193  pred = vpred[i] - dc;
1194  }
1195 
1196  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1197 
1198  left[i] = buffer[mb_x][i] =
1199  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1200  }
1201 
1202  if (s->restart_interval && !--s->restart_count) {
1203  align_get_bits(&s->gb);
1204  skip_bits(&s->gb, 16); /* skip RSTn */
1205  }
1206  }
1207  if (s->rct && s->nb_components == 4) {
1208  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1209  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1210  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1211  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1212  ptr[4*mb_x + 0] = buffer[mb_x][3];
1213  }
1214  } else if (s->nb_components == 4) {
1215  for(i=0; i<nb_components; i++) {
1216  int c= s->comp_index[i];
1217  if (s->bits <= 8) {
1218  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1219  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1220  }
1221  } else if(s->bits == 9) {
1222  return AVERROR_PATCHWELCOME;
1223  } else {
1224  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1225  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1226  }
1227  }
1228  }
1229  } else if (s->rct) {
1230  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1231  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1232  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1233  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1234  }
1235  } else if (s->pegasus_rct) {
1236  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1237  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1238  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1239  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1240  }
1241  } else if (s->bayer) {
1242  if (s->bits <= 8)
1243  return AVERROR_PATCHWELCOME;
1244  if (nb_components == 1) {
1245  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1246  for (mb_x = 0; mb_x < width; mb_x++)
1247  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1248  } else if (nb_components == 2) {
1249  for (mb_x = 0; mb_x < width; mb_x++) {
1250  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1251  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1252  }
1253  }
1254  } else {
1255  for(i=0; i<nb_components; i++) {
1256  int c= s->comp_index[i];
1257  if (s->bits <= 8) {
1258  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1259  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1260  }
1261  } else if(s->bits == 9) {
1262  return AVERROR_PATCHWELCOME;
1263  } else {
1264  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1265  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1266  }
1267  }
1268  }
1269  }
1270  }
1271  return 0;
1272 }
1273 
1275  int point_transform, int nb_components)
1276 {
1277  int i, mb_x, mb_y, mask;
1278  int bits= (s->bits+7)&~7;
1279  int resync_mb_y = 0;
1280  int resync_mb_x = 0;
1281 
1282  point_transform += bits - s->bits;
1283  mask = ((1 << s->bits) - 1) << point_transform;
1284 
1285  av_assert0(nb_components>=1 && nb_components<=4);
1286 
1287  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1288  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1289  if (get_bits_left(&s->gb) < 1) {
1290  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1291  return AVERROR_INVALIDDATA;
1292  }
1293  if (s->restart_interval && !s->restart_count){
1294  s->restart_count = s->restart_interval;
1295  resync_mb_x = mb_x;
1296  resync_mb_y = mb_y;
1297  }
1298 
1299  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1300  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1301  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1302  for (i = 0; i < nb_components; i++) {
1303  uint8_t *ptr;
1304  uint16_t *ptr16;
1305  int n, h, v, x, y, c, j, linesize;
1306  n = s->nb_blocks[i];
1307  c = s->comp_index[i];
1308  h = s->h_scount[i];
1309  v = s->v_scount[i];
1310  x = 0;
1311  y = 0;
1312  linesize= s->linesize[c];
1313 
1314  if(bits>8) linesize /= 2;
1315 
1316  for(j=0; j<n; j++) {
1317  int pred, dc;
1318 
1319  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1320  if(dc == 0xFFFFF)
1321  return -1;
1322  if ( h * mb_x + x >= s->width
1323  || v * mb_y + y >= s->height) {
1324  // Nothing to do
1325  } else if (bits<=8) {
1326  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1327  if(y==0 && toprow){
1328  if(x==0 && leftcol){
1329  pred= 1 << (bits - 1);
1330  }else{
1331  pred= ptr[-1];
1332  }
1333  }else{
1334  if(x==0 && leftcol){
1335  pred= ptr[-linesize];
1336  }else{
1337  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1338  }
1339  }
1340 
1341  if (s->interlaced && s->bottom_field)
1342  ptr += linesize >> 1;
1343  pred &= mask;
1344  *ptr= pred + ((unsigned)dc << point_transform);
1345  }else{
1346  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1347  if(y==0 && toprow){
1348  if(x==0 && leftcol){
1349  pred= 1 << (bits - 1);
1350  }else{
1351  pred= ptr16[-1];
1352  }
1353  }else{
1354  if(x==0 && leftcol){
1355  pred= ptr16[-linesize];
1356  }else{
1357  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1358  }
1359  }
1360 
1361  if (s->interlaced && s->bottom_field)
1362  ptr16 += linesize >> 1;
1363  pred &= mask;
1364  *ptr16= pred + ((unsigned)dc << point_transform);
1365  }
1366  if (++x == h) {
1367  x = 0;
1368  y++;
1369  }
1370  }
1371  }
1372  } else {
1373  for (i = 0; i < nb_components; i++) {
1374  uint8_t *ptr;
1375  uint16_t *ptr16;
1376  int n, h, v, x, y, c, j, linesize, dc;
1377  n = s->nb_blocks[i];
1378  c = s->comp_index[i];
1379  h = s->h_scount[i];
1380  v = s->v_scount[i];
1381  x = 0;
1382  y = 0;
1383  linesize = s->linesize[c];
1384 
1385  if(bits>8) linesize /= 2;
1386 
1387  for (j = 0; j < n; j++) {
1388  int pred;
1389 
1390  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1391  if(dc == 0xFFFFF)
1392  return -1;
1393  if ( h * mb_x + x >= s->width
1394  || v * mb_y + y >= s->height) {
1395  // Nothing to do
1396  } else if (bits<=8) {
1397  ptr = s->picture_ptr->data[c] +
1398  (linesize * (v * mb_y + y)) +
1399  (h * mb_x + x); //FIXME optimize this crap
1400  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1401 
1402  pred &= mask;
1403  *ptr = pred + ((unsigned)dc << point_transform);
1404  }else{
1405  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1406  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1407 
1408  pred &= mask;
1409  *ptr16= pred + ((unsigned)dc << point_transform);
1410  }
1411 
1412  if (++x == h) {
1413  x = 0;
1414  y++;
1415  }
1416  }
1417  }
1418  }
1419  if (s->restart_interval && !--s->restart_count) {
1420  align_get_bits(&s->gb);
1421  skip_bits(&s->gb, 16); /* skip RSTn */
1422  }
1423  }
1424  }
1425  return 0;
1426 }
1427 
1429  uint8_t *dst, const uint8_t *src,
1430  int linesize, int lowres)
1431 {
1432  switch (lowres) {
1433  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1434  break;
1435  case 1: copy_block4(dst, src, linesize, linesize, 4);
1436  break;
1437  case 2: copy_block2(dst, src, linesize, linesize, 2);
1438  break;
1439  case 3: *dst = *src;
1440  break;
1441  }
1442 }
1443 
1444 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1445 {
1446  int block_x, block_y;
1447  int size = 8 >> s->avctx->lowres;
1448  if (s->bits > 8) {
1449  for (block_y=0; block_y<size; block_y++)
1450  for (block_x=0; block_x<size; block_x++)
1451  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1452  } else {
1453  for (block_y=0; block_y<size; block_y++)
1454  for (block_x=0; block_x<size; block_x++)
1455  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1456  }
1457 }
1458 
1459 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1460  int Al, const uint8_t *mb_bitmask,
1461  int mb_bitmask_size,
1462  const AVFrame *reference)
1463 {
1464  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1465  uint8_t *data[MAX_COMPONENTS];
1466  const uint8_t *reference_data[MAX_COMPONENTS];
1467  int linesize[MAX_COMPONENTS];
1468  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1469  int bytes_per_pixel = 1 + (s->bits > 8);
1470 
1471  if (mb_bitmask) {
1472  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1473  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1474  return AVERROR_INVALIDDATA;
1475  }
1476  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1477  }
1478 
1479  s->restart_count = 0;
1480 
1481  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1482  &chroma_v_shift);
1483  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1484  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1485 
1486  for (i = 0; i < nb_components; i++) {
1487  int c = s->comp_index[i];
1488  data[c] = s->picture_ptr->data[c];
1489  reference_data[c] = reference ? reference->data[c] : NULL;
1490  linesize[c] = s->linesize[c];
1491  s->coefs_finished[c] |= 1;
1492  }
1493 
1494  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1495  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1496  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1497 
1498  if (s->restart_interval && !s->restart_count)
1499  s->restart_count = s->restart_interval;
1500 
1501  if (get_bits_left(&s->gb) < 0) {
1502  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1503  -get_bits_left(&s->gb));
1504  return AVERROR_INVALIDDATA;
1505  }
1506  for (i = 0; i < nb_components; i++) {
1507  uint8_t *ptr;
1508  int n, h, v, x, y, c, j;
1509  int block_offset;
1510  n = s->nb_blocks[i];
1511  c = s->comp_index[i];
1512  h = s->h_scount[i];
1513  v = s->v_scount[i];
1514  x = 0;
1515  y = 0;
1516  for (j = 0; j < n; j++) {
1517  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1518  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1519 
1520  if (s->interlaced && s->bottom_field)
1521  block_offset += linesize[c] >> 1;
1522  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1523  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1524  ptr = data[c] + block_offset;
1525  } else
1526  ptr = NULL;
1527  if (!s->progressive) {
1528  if (copy_mb) {
1529  if (ptr)
1530  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1531  linesize[c], s->avctx->lowres);
1532 
1533  } else {
1534  s->bdsp.clear_block(s->block);
1535  if (decode_block(s, s->block, i,
1536  s->dc_index[i], s->ac_index[i],
1537  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1538  av_log(s->avctx, AV_LOG_ERROR,
1539  "error y=%d x=%d\n", mb_y, mb_x);
1540  return AVERROR_INVALIDDATA;
1541  }
1542  if (ptr && linesize[c]) {
1543  s->idsp.idct_put(ptr, linesize[c], s->block);
1544  if (s->bits & 7)
1545  shift_output(s, ptr, linesize[c]);
1546  }
1547  }
1548  } else {
1549  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1550  (h * mb_x + x);
1551  int16_t *block = s->blocks[c][block_idx];
1552  if (Ah)
1553  block[0] += get_bits1(&s->gb) *
1554  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1555  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1556  s->quant_matrixes[s->quant_sindex[i]],
1557  Al) < 0) {
1558  av_log(s->avctx, AV_LOG_ERROR,
1559  "error y=%d x=%d\n", mb_y, mb_x);
1560  return AVERROR_INVALIDDATA;
1561  }
1562  }
1563  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1564  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1565  mb_x, mb_y, x, y, c, s->bottom_field,
1566  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1567  if (++x == h) {
1568  x = 0;
1569  y++;
1570  }
1571  }
1572  }
1573 
1574  handle_rstn(s, nb_components);
1575  }
1576  }
1577  return 0;
1578 }
1579 
1581  int se, int Ah, int Al)
1582 {
1583  int mb_x, mb_y;
1584  int EOBRUN = 0;
1585  int c = s->comp_index[0];
1586  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1587 
1588  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1589  if (se < ss || se > 63) {
1590  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1591  return AVERROR_INVALIDDATA;
1592  }
1593 
1594  // s->coefs_finished is a bitmask for coefficients coded
1595  // ss and se are parameters telling start and end coefficients
1596  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1597 
1598  s->restart_count = 0;
1599 
1600  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1601  int block_idx = mb_y * s->block_stride[c];
1602  int16_t (*block)[64] = &s->blocks[c][block_idx];
1603  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1604  if (get_bits_left(&s->gb) <= 0) {
1605  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1606  return AVERROR_INVALIDDATA;
1607  }
1608  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1609  int ret;
1610  if (s->restart_interval && !s->restart_count)
1611  s->restart_count = s->restart_interval;
1612 
1613  if (Ah)
1614  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1615  quant_matrix, ss, se, Al, &EOBRUN);
1616  else
1617  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1618  quant_matrix, ss, se, Al, &EOBRUN);
1619 
1620  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1622  if (ret < 0) {
1623  av_log(s->avctx, AV_LOG_ERROR,
1624  "error y=%d x=%d\n", mb_y, mb_x);
1625  return AVERROR_INVALIDDATA;
1626  }
1627 
1628  if (handle_rstn(s, 0))
1629  EOBRUN = 0;
1630  }
1631  }
1632  return 0;
1633 }
1634 
1636 {
1637  int mb_x, mb_y;
1638  int c;
1639  const int bytes_per_pixel = 1 + (s->bits > 8);
1640  const int block_size = s->lossless ? 1 : 8;
1641 
1642  for (c = 0; c < s->nb_components; c++) {
1643  uint8_t *data = s->picture_ptr->data[c];
1644  int linesize = s->linesize[c];
1645  int h = s->h_max / s->h_count[c];
1646  int v = s->v_max / s->v_count[c];
1647  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1648  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1649 
1650  if (~s->coefs_finished[c])
1651  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1652 
1653  if (s->interlaced && s->bottom_field)
1654  data += linesize >> 1;
1655 
1656  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1657  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1658  int block_idx = mb_y * s->block_stride[c];
1659  int16_t (*block)[64] = &s->blocks[c][block_idx];
1660  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1661  s->idsp.idct_put(ptr, linesize, *block);
1662  if (s->bits & 7)
1663  shift_output(s, ptr, linesize);
1664  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1665  }
1666  }
1667  }
1668 }
1669 
1670 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1671  int mb_bitmask_size, const AVFrame *reference)
1672 {
1673  int len, nb_components, i, h, v, predictor, point_transform;
1674  int index, id, ret;
1675  const int block_size = s->lossless ? 1 : 8;
1676  int ilv, prev_shift;
1677 
1678  if (!s->got_picture) {
1679  av_log(s->avctx, AV_LOG_WARNING,
1680  "Can not process SOS before SOF, skipping\n");
1681  return -1;
1682  }
1683 
1684  if (reference) {
1685  if (reference->width != s->picture_ptr->width ||
1686  reference->height != s->picture_ptr->height ||
1687  reference->format != s->picture_ptr->format) {
1688  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1689  return AVERROR_INVALIDDATA;
1690  }
1691  }
1692 
1693  /* XXX: verify len field validity */
1694  len = get_bits(&s->gb, 16);
1695  nb_components = get_bits(&s->gb, 8);
1696  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1698  "decode_sos: nb_components (%d)",
1699  nb_components);
1700  return AVERROR_PATCHWELCOME;
1701  }
1702  if (len != 6 + 2 * nb_components) {
1703  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1704  return AVERROR_INVALIDDATA;
1705  }
1706  for (i = 0; i < nb_components; i++) {
1707  id = get_bits(&s->gb, 8);
1708  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1709  /* find component index */
1710  for (index = 0; index < s->nb_components; index++)
1711  if (id == s->component_id[index])
1712  break;
1713  if (index == s->nb_components) {
1714  av_log(s->avctx, AV_LOG_ERROR,
1715  "decode_sos: index(%d) out of components\n", index);
1716  return AVERROR_INVALIDDATA;
1717  }
1718  /* Metasoft MJPEG codec has Cb and Cr swapped */
1719  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1720  && nb_components == 3 && s->nb_components == 3 && i)
1721  index = 3 - i;
1722 
1723  s->quant_sindex[i] = s->quant_index[index];
1724  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1725  s->h_scount[i] = s->h_count[index];
1726  s->v_scount[i] = s->v_count[index];
1727 
1728  s->comp_index[i] = index;
1729 
1730  s->dc_index[i] = get_bits(&s->gb, 4);
1731  s->ac_index[i] = get_bits(&s->gb, 4);
1732 
1733  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1734  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1735  goto out_of_range;
1736  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1737  goto out_of_range;
1738  }
1739 
1740  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1741  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1742  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1743  prev_shift = get_bits(&s->gb, 4); /* Ah */
1744  point_transform = get_bits(&s->gb, 4); /* Al */
1745  }else
1746  prev_shift = point_transform = 0;
1747 
1748  if (nb_components > 1) {
1749  /* interleaved stream */
1750  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1751  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1752  } else if (!s->ls) { /* skip this for JPEG-LS */
1753  h = s->h_max / s->h_scount[0];
1754  v = s->v_max / s->v_scount[0];
1755  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1756  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1757  s->nb_blocks[0] = 1;
1758  s->h_scount[0] = 1;
1759  s->v_scount[0] = 1;
1760  }
1761 
1762  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1763  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1764  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1765  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1766  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1767 
1768 
1769  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1770  for (i = s->mjpb_skiptosod; i > 0; i--)
1771  skip_bits(&s->gb, 8);
1772 
1773 next_field:
1774  for (i = 0; i < nb_components; i++)
1775  s->last_dc[i] = (4 << s->bits);
1776 
1777  if (s->avctx->hwaccel) {
1778  int bytes_to_start = get_bits_count(&s->gb) / 8;
1779  av_assert0(bytes_to_start >= 0 &&
1780  s->raw_scan_buffer_size >= bytes_to_start);
1781 
1782  ret = FF_HW_CALL(s->avctx, decode_slice,
1783  s->raw_scan_buffer + bytes_to_start,
1784  s->raw_scan_buffer_size - bytes_to_start);
1785  if (ret < 0)
1786  return ret;
1787 
1788  } else if (s->lossless) {
1789  av_assert0(s->picture_ptr == s->picture);
1790  if (CONFIG_JPEGLS_DECODER && s->ls) {
1791 // for () {
1792 // reset_ls_coding_parameters(s, 0);
1793 
1795  point_transform, ilv)) < 0)
1796  return ret;
1797  } else {
1798  if (s->rgb || s->bayer) {
1799  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1800  return ret;
1801  } else {
1803  point_transform,
1804  nb_components)) < 0)
1805  return ret;
1806  }
1807  }
1808  } else {
1809  if (s->progressive && predictor) {
1810  av_assert0(s->picture_ptr == s->picture);
1812  ilv, prev_shift,
1813  point_transform)) < 0)
1814  return ret;
1815  } else {
1816  if ((ret = mjpeg_decode_scan(s, nb_components,
1817  prev_shift, point_transform,
1818  mb_bitmask, mb_bitmask_size, reference)) < 0)
1819  return ret;
1820  }
1821  }
1822 
1823  if (s->interlaced &&
1824  get_bits_left(&s->gb) > 32 &&
1825  show_bits(&s->gb, 8) == 0xFF) {
1826  GetBitContext bak = s->gb;
1827  align_get_bits(&bak);
1828  if (show_bits(&bak, 16) == 0xFFD1) {
1829  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1830  s->gb = bak;
1831  skip_bits(&s->gb, 16);
1832  s->bottom_field ^= 1;
1833 
1834  goto next_field;
1835  }
1836  }
1837 
1838  emms_c();
1839  return 0;
1840  out_of_range:
1841  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1842  return AVERROR_INVALIDDATA;
1843 }
1844 
1846 {
1847  if (get_bits(&s->gb, 16) != 4)
1848  return AVERROR_INVALIDDATA;
1849  s->restart_interval = get_bits(&s->gb, 16);
1850  s->restart_count = 0;
1851  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1852  s->restart_interval);
1853 
1854  return 0;
1855 }
1856 
1858 {
1859  int len, id, i;
1860 
1861  len = get_bits(&s->gb, 16);
1862  if (len < 2)
1863  return AVERROR_INVALIDDATA;
1864  len -= 2;
1865 
1866  if (len < 4) {
1867  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1868  return AVERROR_INVALIDDATA;
1869  av_log(s->avctx, AV_LOG_VERBOSE, "skipping APPx stub (len=%" PRId32 ")\n", len);
1870  goto out;
1871  }
1872 
1873  if (8 * len > get_bits_left(&s->gb))
1874  return AVERROR_INVALIDDATA;
1875 
1876  id = get_bits_long(&s->gb, 32);
1877  len -= 4;
1878 
1879  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1880  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1881  av_fourcc2str(av_bswap32(id)), id, len);
1882 
1883  /* Buggy AVID, it puts EOI only at every 10th frame. */
1884  /* Also, this fourcc is used by non-avid files too, it holds some
1885  information, but it's always present in AVID-created files. */
1886  if (id == AV_RB32("AVI1")) {
1887  /* structure:
1888  4bytes AVI1
1889  1bytes polarity
1890  1bytes always zero
1891  4bytes field_size
1892  4bytes field_size_less_padding
1893  */
1894  s->buggy_avid = 1;
1895  i = get_bits(&s->gb, 8); len--;
1896  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1897  goto out;
1898  }
1899 
1900  if (id == AV_RB32("JFIF")) {
1901  int t_w, t_h, v1, v2;
1902  if (len < 8)
1903  goto out;
1904  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1905  v1 = get_bits(&s->gb, 8);
1906  v2 = get_bits(&s->gb, 8);
1907  skip_bits(&s->gb, 8);
1908 
1909  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1910  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1911  if ( s->avctx->sample_aspect_ratio.num <= 0
1912  || s->avctx->sample_aspect_ratio.den <= 0) {
1913  s->avctx->sample_aspect_ratio.num = 0;
1914  s->avctx->sample_aspect_ratio.den = 1;
1915  }
1916 
1917  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1918  av_log(s->avctx, AV_LOG_INFO,
1919  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1920  v1, v2,
1921  s->avctx->sample_aspect_ratio.num,
1922  s->avctx->sample_aspect_ratio.den);
1923 
1924  len -= 8;
1925  if (len >= 2) {
1926  t_w = get_bits(&s->gb, 8);
1927  t_h = get_bits(&s->gb, 8);
1928  if (t_w && t_h) {
1929  /* skip thumbnail */
1930  if (len -10 - (t_w * t_h * 3) > 0)
1931  len -= t_w * t_h * 3;
1932  }
1933  len -= 2;
1934  }
1935  goto out;
1936  }
1937 
1938  if ( id == AV_RB32("Adob")
1939  && len >= 8
1940  && show_bits(&s->gb, 8) == 'e'
1941  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1942  skip_bits(&s->gb, 8); /* 'e' */
1943  skip_bits(&s->gb, 16); /* version */
1944  skip_bits(&s->gb, 16); /* flags0 */
1945  skip_bits(&s->gb, 16); /* flags1 */
1946  s->adobe_transform = get_bits(&s->gb, 8);
1947  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1948  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1949  len -= 8;
1950  goto out;
1951  }
1952 
1953  if (id == AV_RB32("LJIF")) {
1954  int rgb = s->rgb;
1955  int pegasus_rct = s->pegasus_rct;
1956  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1957  av_log(s->avctx, AV_LOG_INFO,
1958  "Pegasus lossless jpeg header found\n");
1959  skip_bits(&s->gb, 16); /* version ? */
1960  skip_bits(&s->gb, 16); /* unknown always 0? */
1961  skip_bits(&s->gb, 16); /* unknown always 0? */
1962  skip_bits(&s->gb, 16); /* unknown always 0? */
1963  switch (i=get_bits(&s->gb, 8)) {
1964  case 1:
1965  rgb = 1;
1966  pegasus_rct = 0;
1967  break;
1968  case 2:
1969  rgb = 1;
1970  pegasus_rct = 1;
1971  break;
1972  default:
1973  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1974  }
1975 
1976  len -= 9;
1977  if (s->bayer)
1978  goto out;
1979  if (s->got_picture)
1980  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1981  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1982  goto out;
1983  }
1984 
1985  s->rgb = rgb;
1986  s->pegasus_rct = pegasus_rct;
1987 
1988  goto out;
1989  }
1990  if (id == AV_RL32("colr") && len > 0) {
1991  s->colr = get_bits(&s->gb, 8);
1992  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1993  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1994  len --;
1995  goto out;
1996  }
1997  if (id == AV_RL32("xfrm") && len > 0) {
1998  s->xfrm = get_bits(&s->gb, 8);
1999  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2000  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
2001  len --;
2002  goto out;
2003  }
2004 
2005  /* JPS extension by VRex */
2006  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
2007  int flags, layout, type;
2008  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2009  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
2010 
2011  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
2012  skip_bits(&s->gb, 16); len -= 2; /* block length */
2013  skip_bits(&s->gb, 8); /* reserved */
2014  flags = get_bits(&s->gb, 8);
2015  layout = get_bits(&s->gb, 8);
2016  type = get_bits(&s->gb, 8);
2017  len -= 4;
2018 
2019  av_freep(&s->stereo3d);
2020  s->stereo3d = av_stereo3d_alloc();
2021  if (!s->stereo3d) {
2022  goto out;
2023  }
2024  if (type == 0) {
2025  s->stereo3d->type = AV_STEREO3D_2D;
2026  } else if (type == 1) {
2027  switch (layout) {
2028  case 0x01:
2029  s->stereo3d->type = AV_STEREO3D_LINES;
2030  break;
2031  case 0x02:
2032  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2033  break;
2034  case 0x03:
2035  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2036  break;
2037  }
2038  if (!(flags & 0x04)) {
2039  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2040  }
2041  }
2042  goto out;
2043  }
2044 
2045  /* EXIF metadata */
2046  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2047  int ret;
2048  const uint8_t *aligned;
2049 
2050  skip_bits(&s->gb, 16); // skip padding
2051  len -= 2;
2052 
2053  // init byte wise reading
2054  aligned = align_get_bits(&s->gb);
2055 
2056  ret = av_exif_parse_buffer(s->avctx, aligned, len, &s->exif_metadata, AV_EXIF_TIFF_HEADER);
2057  if (ret < 0) {
2058  av_log(s->avctx, AV_LOG_WARNING, "unable to parse EXIF buffer\n");
2059  goto out;
2060  }
2061 
2062  skip_bits(&s->gb, ret << 3);
2063  len -= ret;
2064 
2065  goto out;
2066  }
2067 
2068  /* Apple MJPEG-A */
2069  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2070  id = get_bits_long(&s->gb, 32);
2071  len -= 4;
2072  /* Apple MJPEG-A */
2073  if (id == AV_RB32("mjpg")) {
2074  /* structure:
2075  4bytes field size
2076  4bytes pad field size
2077  4bytes next off
2078  4bytes quant off
2079  4bytes huff off
2080  4bytes image off
2081  4bytes scan off
2082  4bytes data off
2083  */
2084  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2085  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2086  }
2087  }
2088 
2089  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2090  int id2;
2091  unsigned seqno;
2092  unsigned nummarkers;
2093 
2094  id = get_bits_long(&s->gb, 32);
2095  id2 = get_bits(&s->gb, 24);
2096  len -= 7;
2097  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2098  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2099  goto out;
2100  }
2101 
2102  skip_bits(&s->gb, 8);
2103  seqno = get_bits(&s->gb, 8);
2104  len -= 2;
2105  if (seqno == 0) {
2106  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2107  goto out;
2108  }
2109 
2110  nummarkers = get_bits(&s->gb, 8);
2111  len -= 1;
2112  if (nummarkers == 0) {
2113  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2114  goto out;
2115  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2116  av_log(s->avctx, AV_LOG_WARNING, "Mismatch in coded number of ICC markers between markers\n");
2117  goto out;
2118  } else if (seqno > nummarkers) {
2119  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2120  goto out;
2121  }
2122 
2123  /* Allocate if this is the first APP2 we've seen. */
2124  if (s->iccnum == 0) {
2125  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2126  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2127  return AVERROR(ENOMEM);
2128  }
2129  s->iccnum = nummarkers;
2130  }
2131 
2132  if (s->iccentries[seqno - 1].data) {
2133  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2134  goto out;
2135  }
2136 
2137  s->iccentries[seqno - 1].length = len;
2138  s->iccentries[seqno - 1].data = av_malloc(len);
2139  if (!s->iccentries[seqno - 1].data) {
2140  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2141  return AVERROR(ENOMEM);
2142  }
2143 
2144  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2145  skip_bits(&s->gb, len << 3);
2146  len = 0;
2147  s->iccread++;
2148 
2149  if (s->iccread > s->iccnum)
2150  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2151  }
2152 
2153 out:
2154  /* slow but needed for extreme adobe jpegs */
2155  if (len < 0)
2156  av_log(s->avctx, AV_LOG_ERROR,
2157  "mjpeg: error, decode_app parser read over the end\n");
2158  while (len-- > 0)
2159  skip_bits(&s->gb, 8);
2160 
2161  return 0;
2162 }
2163 
2165 {
2166  int len = get_bits(&s->gb, 16);
2167  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2168  int i;
2169  char *cbuf = av_malloc(len - 1);
2170  if (!cbuf)
2171  return AVERROR(ENOMEM);
2172 
2173  for (i = 0; i < len - 2; i++)
2174  cbuf[i] = get_bits(&s->gb, 8);
2175  if (i > 0 && cbuf[i - 1] == '\n')
2176  cbuf[i - 1] = 0;
2177  else
2178  cbuf[i] = 0;
2179 
2180  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2181  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2182 
2183  /* buggy avid, it puts EOI only at every 10th frame */
2184  if (!strncmp(cbuf, "AVID", 4)) {
2185  parse_avid(s, cbuf, len);
2186  } else if (!strcmp(cbuf, "CS=ITU601"))
2187  s->cs_itu601 = 1;
2188  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2189  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2190  s->flipped = 1;
2191  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2192  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2193  s->multiscope = 2;
2194  }
2195 
2196  av_free(cbuf);
2197  }
2198 
2199  return 0;
2200 }
2201 
2202 /* return the 8 bit start code value and update the search
2203  state. Return -1 if no start code found */
2204 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2205 {
2206  const uint8_t *buf_ptr;
2207  unsigned int v, v2;
2208  int val;
2209  int skipped = 0;
2210 
2211  buf_ptr = *pbuf_ptr;
2212  while (buf_end - buf_ptr > 1) {
2213  v = *buf_ptr++;
2214  v2 = *buf_ptr;
2215  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2216  val = *buf_ptr++;
2217  goto found;
2218  }
2219  skipped++;
2220  }
2221  buf_ptr = buf_end;
2222  val = -1;
2223 found:
2224  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2225  *pbuf_ptr = buf_ptr;
2226  return val;
2227 }
2228 
2230  const uint8_t **buf_ptr, const uint8_t *buf_end,
2231  const uint8_t **unescaped_buf_ptr,
2232  int *unescaped_buf_size)
2233 {
2234  int start_code;
2235  start_code = find_marker(buf_ptr, buf_end);
2236 
2237  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2238  if (!s->buffer)
2239  return AVERROR(ENOMEM);
2240 
2241  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2242  if (start_code == SOS && !s->ls) {
2243  const uint8_t *src = *buf_ptr;
2244  const uint8_t *ptr = src;
2245  uint8_t *dst = s->buffer;
2246 
2247  #define copy_data_segment(skip) do { \
2248  ptrdiff_t length = (ptr - src) - (skip); \
2249  if (length > 0) { \
2250  memcpy(dst, src, length); \
2251  dst += length; \
2252  src = ptr; \
2253  } \
2254  } while (0)
2255 
2256  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2257  ptr = buf_end;
2258  copy_data_segment(0);
2259  } else {
2260  while (ptr < buf_end) {
2261  uint8_t x = *(ptr++);
2262 
2263  if (x == 0xff) {
2264  ptrdiff_t skip = 0;
2265  while (ptr < buf_end && x == 0xff) {
2266  x = *(ptr++);
2267  skip++;
2268  }
2269 
2270  /* 0xFF, 0xFF, ... */
2271  if (skip > 1) {
2273 
2274  /* decrement src as it is equal to ptr after the
2275  * copy_data_segment macro and we might want to
2276  * copy the current value of x later on */
2277  src--;
2278  }
2279 
2280  if (x < RST0 || x > RST7) {
2281  copy_data_segment(1);
2282  if (x)
2283  break;
2284  }
2285  }
2286  }
2287  if (src < ptr)
2288  copy_data_segment(0);
2289  }
2290  #undef copy_data_segment
2291 
2292  *unescaped_buf_ptr = s->buffer;
2293  *unescaped_buf_size = dst - s->buffer;
2294  memset(s->buffer + *unescaped_buf_size, 0,
2296 
2297  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2298  (buf_end - *buf_ptr) - (dst - s->buffer));
2299  } else if (start_code == SOS && s->ls) {
2300  const uint8_t *src = *buf_ptr;
2301  uint8_t *dst = s->buffer;
2302  int bit_count = 0;
2303  int t = 0, b = 0;
2304  PutBitContext pb;
2305 
2306  /* find marker */
2307  while (src + t < buf_end) {
2308  uint8_t x = src[t++];
2309  if (x == 0xff) {
2310  while ((src + t < buf_end) && x == 0xff)
2311  x = src[t++];
2312  if (x & 0x80) {
2313  t -= FFMIN(2, t);
2314  break;
2315  }
2316  }
2317  }
2318  bit_count = t * 8;
2319  init_put_bits(&pb, dst, t);
2320 
2321  /* unescape bitstream */
2322  while (b < t) {
2323  uint8_t x = src[b++];
2324  put_bits(&pb, 8, x);
2325  if (x == 0xFF && b < t) {
2326  x = src[b++];
2327  if (x & 0x80) {
2328  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2329  x &= 0x7f;
2330  }
2331  put_bits(&pb, 7, x);
2332  bit_count--;
2333  }
2334  }
2335  flush_put_bits(&pb);
2336 
2337  *unescaped_buf_ptr = dst;
2338  *unescaped_buf_size = (bit_count + 7) >> 3;
2339  memset(s->buffer + *unescaped_buf_size, 0,
2341  } else {
2342  *unescaped_buf_ptr = *buf_ptr;
2343  *unescaped_buf_size = buf_end - *buf_ptr;
2344  }
2345 
2346  return start_code;
2347 }
2348 
2350 {
2351  int i;
2352 
2353  if (s->iccentries) {
2354  for (i = 0; i < s->iccnum; i++)
2355  av_freep(&s->iccentries[i].data);
2356  av_freep(&s->iccentries);
2357  }
2358 
2359  s->iccread = 0;
2360  s->iccnum = 0;
2361 }
2362 
2364  int *got_frame, const AVPacket *avpkt,
2365  const uint8_t *buf, const int buf_size)
2366 {
2367  MJpegDecodeContext *s = avctx->priv_data;
2368  const uint8_t *buf_end, *buf_ptr;
2369  const uint8_t *unescaped_buf_ptr;
2370  int hshift, vshift;
2371  int unescaped_buf_size;
2372  int start_code;
2373  int index;
2374  int ret = 0;
2375  int is16bit;
2376 
2377  s->force_pal8 = 0;
2378 
2379  s->buf_size = buf_size;
2380 
2381  av_exif_free(&s->exif_metadata);
2382  av_freep(&s->stereo3d);
2383  s->adobe_transform = -1;
2384 
2385  if (s->iccnum != 0)
2387 
2388 redo_for_pal8:
2389  buf_ptr = buf;
2390  buf_end = buf + buf_size;
2391  while (buf_ptr < buf_end) {
2392  /* find start next marker */
2393  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2394  &unescaped_buf_ptr,
2395  &unescaped_buf_size);
2396  /* EOF */
2397  if (start_code < 0) {
2398  break;
2399  } else if (unescaped_buf_size > INT_MAX / 8) {
2400  av_log(avctx, AV_LOG_ERROR,
2401  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2402  start_code, unescaped_buf_size, buf_size);
2403  return AVERROR_INVALIDDATA;
2404  }
2405  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2406  start_code, buf_end - buf_ptr);
2407 
2408  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2409 
2410  if (ret < 0) {
2411  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2412  goto fail;
2413  }
2414 
2415  s->start_code = start_code;
2416  if (avctx->debug & FF_DEBUG_STARTCODE)
2417  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2418 
2419  /* process markers */
2420  if (start_code >= RST0 && start_code <= RST7) {
2421  av_log(avctx, AV_LOG_DEBUG,
2422  "restart marker: %d\n", start_code & 0x0f);
2423  /* APP fields */
2424  } else if (start_code >= APP0 && start_code <= APP15) {
2425  if ((ret = mjpeg_decode_app(s)) < 0)
2426  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2427  av_err2str(ret));
2428  /* Comment */
2429  } else if (start_code == COM) {
2430  ret = mjpeg_decode_com(s);
2431  if (ret < 0)
2432  return ret;
2433  } else if (start_code == DQT) {
2435  if (ret < 0)
2436  return ret;
2437  }
2438 
2439  ret = -1;
2440 
2441  if (!CONFIG_JPEGLS_DECODER &&
2442  (start_code == SOF48 || start_code == LSE)) {
2443  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2444  return AVERROR(ENOSYS);
2445  }
2446 
2447  if (avctx->skip_frame == AVDISCARD_ALL) {
2448  switch(start_code) {
2449  case SOF0:
2450  case SOF1:
2451  case SOF2:
2452  case SOF3:
2453  case SOF48:
2454  break;
2455  default:
2456  goto skip;
2457  }
2458  }
2459 
2460  switch (start_code) {
2461  case SOI:
2462  s->restart_interval = 0;
2463  s->restart_count = 0;
2464  s->raw_image_buffer = buf_ptr;
2465  s->raw_image_buffer_size = buf_end - buf_ptr;
2466  /* nothing to do on SOI */
2467  break;
2468  case DHT:
2469  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2470  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2471  goto fail;
2472  }
2473  break;
2474  case SOF0:
2475  case SOF1:
2476  if (start_code == SOF0)
2478  else
2480  s->lossless = 0;
2481  s->ls = 0;
2482  s->progressive = 0;
2483  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2484  goto fail;
2485  break;
2486  case SOF2:
2488  s->lossless = 0;
2489  s->ls = 0;
2490  s->progressive = 1;
2491  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2492  goto fail;
2493  break;
2494  case SOF3:
2496 #if FF_API_CODEC_PROPS
2500 #endif
2501  s->lossless = 1;
2502  s->ls = 0;
2503  s->progressive = 0;
2504  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2505  goto fail;
2506  break;
2507  case SOF48:
2509 #if FF_API_CODEC_PROPS
2513 #endif
2514  s->lossless = 1;
2515  s->ls = 1;
2516  s->progressive = 0;
2517  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2518  goto fail;
2519  break;
2520  case LSE:
2521  if (!CONFIG_JPEGLS_DECODER ||
2522  (ret = ff_jpegls_decode_lse(s)) < 0)
2523  goto fail;
2524  if (ret == 1)
2525  goto redo_for_pal8;
2526  break;
2527  case EOI:
2528 eoi_parser:
2529  if (!avctx->hwaccel &&
2530  s->progressive && s->cur_scan && s->got_picture)
2532  s->cur_scan = 0;
2533  if (!s->got_picture) {
2534  av_log(avctx, AV_LOG_WARNING,
2535  "Found EOI before any SOF, ignoring\n");
2536  break;
2537  }
2538  if (s->interlaced) {
2539  s->bottom_field ^= 1;
2540  /* if not bottom field, do not output image yet */
2541  if (s->bottom_field == !s->interlace_polarity)
2542  break;
2543  }
2544  if (avctx->hwaccel) {
2545  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2546  if (ret < 0)
2547  return ret;
2548 
2549  av_freep(&s->hwaccel_picture_private);
2550  }
2551  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2552  return ret;
2553  if (s->lossless)
2554  frame->flags |= AV_FRAME_FLAG_LOSSLESS;
2555  *got_frame = 1;
2556  s->got_picture = 0;
2557 
2558  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2559  int qp = FFMAX3(s->qscale[0],
2560  s->qscale[1],
2561  s->qscale[2]);
2562 
2563  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2564  }
2565 
2566  goto the_end;
2567  case SOS:
2568  s->raw_scan_buffer = buf_ptr;
2569  s->raw_scan_buffer_size = buf_end - buf_ptr;
2570 
2571  s->cur_scan++;
2572 
2573  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2574  (avctx->err_recognition & AV_EF_EXPLODE))
2575  goto fail;
2576  break;
2577  case DRI:
2578  if ((ret = mjpeg_decode_dri(s)) < 0)
2579  return ret;
2580  break;
2581  case SOF5:
2582  case SOF6:
2583  case SOF7:
2584  case SOF9:
2585  case SOF10:
2586  case SOF11:
2587  case SOF13:
2588  case SOF14:
2589  case SOF15:
2590  case JPG:
2591  av_log(avctx, AV_LOG_ERROR,
2592  "mjpeg: unsupported coding type (%x)\n", start_code);
2593  break;
2594  }
2595 
2596  if (avctx->skip_frame == AVDISCARD_ALL) {
2597  switch(start_code) {
2598  case SOF0:
2599  case SOF1:
2600  case SOF2:
2601  case SOF3:
2602  case SOF48:
2603  s->got_picture = 0;
2604  goto the_end_no_picture;
2605  }
2606  }
2607 
2608 skip:
2609  /* eof process start code */
2610  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2611  av_log(avctx, AV_LOG_DEBUG,
2612  "marker parser used %d bytes (%d bits)\n",
2613  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2614  }
2615  if (s->got_picture && s->cur_scan) {
2616  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2617  goto eoi_parser;
2618  }
2619  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2620  return AVERROR_INVALIDDATA;
2621 fail:
2622  s->got_picture = 0;
2623  return ret;
2624 the_end:
2625 
2626  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2627 
2628  if (AV_RB32(s->upscale_h)) {
2629  int p;
2631  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2632  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2633  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2634  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2635  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2636  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2637  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2638  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2639  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2640  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2641  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2642  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2643  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2644  );
2645  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2646  if (ret)
2647  return ret;
2648 
2649  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2650  for (p = 0; p<s->nb_components; p++) {
2651  uint8_t *line = s->picture_ptr->data[p];
2652  int w = s->width;
2653  int h = s->height;
2654  if (!s->upscale_h[p])
2655  continue;
2656  if (p==1 || p==2) {
2657  w = AV_CEIL_RSHIFT(w, hshift);
2658  h = AV_CEIL_RSHIFT(h, vshift);
2659  }
2660  if (s->upscale_v[p] == 1)
2661  h = (h+1)>>1;
2662  av_assert0(w > 0);
2663  for (int i = 0; i < h; i++) {
2664  if (s->upscale_h[p] == 1) {
2665  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2666  else line[w - 1] = line[(w - 1) / 2];
2667  for (index = w - 2; index > 0; index--) {
2668  if (is16bit)
2669  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2670  else
2671  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2672  }
2673  } else if (s->upscale_h[p] == 2) {
2674  if (is16bit) {
2675  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2676  if (w > 1)
2677  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2678  } else {
2679  line[w - 1] = line[(w - 1) / 3];
2680  if (w > 1)
2681  line[w - 2] = line[w - 1];
2682  }
2683  for (index = w - 3; index > 0; index--) {
2684  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2685  }
2686  } else if (s->upscale_h[p] == 4){
2687  if (is16bit) {
2688  uint16_t *line16 = (uint16_t *) line;
2689  line16[w - 1] = line16[(w - 1) >> 2];
2690  if (w > 1)
2691  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2692  if (w > 2)
2693  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2694  } else {
2695  line[w - 1] = line[(w - 1) >> 2];
2696  if (w > 1)
2697  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2698  if (w > 2)
2699  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2700  }
2701  for (index = w - 4; index > 0; index--)
2702  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2703  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2704  }
2705  line += s->linesize[p];
2706  }
2707  }
2708  }
2709  if (AV_RB32(s->upscale_v)) {
2710  int p;
2712  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2713  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2714  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2715  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2716  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2717  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2718  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2719  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2720  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2721  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2722  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2723  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2724  );
2725  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2726  if (ret)
2727  return ret;
2728 
2729  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2730  for (p = 0; p < s->nb_components; p++) {
2731  uint8_t *dst;
2732  int w = s->width;
2733  int h = s->height;
2734  if (!s->upscale_v[p])
2735  continue;
2736  if (p==1 || p==2) {
2737  w = AV_CEIL_RSHIFT(w, hshift);
2738  h = AV_CEIL_RSHIFT(h, vshift);
2739  }
2740  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2741  for (int i = h - 1; i; i--) {
2742  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2743  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2744  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2745  memcpy(dst, src1, w);
2746  } else {
2747  for (index = 0; index < w; index++)
2748  dst[index] = (src1[index] + src2[index]) >> 1;
2749  }
2750  dst -= s->linesize[p];
2751  }
2752  }
2753  }
2754  if (s->flipped && !s->rgb) {
2755  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2756  if (ret)
2757  return ret;
2758 
2759  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2760  for (index=0; index<s->nb_components; index++) {
2761  int h = frame->height;
2762  if (index && index < 3)
2763  h = AV_CEIL_RSHIFT(h, vshift);
2764  if (frame->data[index]) {
2765  frame->data[index] += (h - 1) * frame->linesize[index];
2766  frame->linesize[index] *= -1;
2767  }
2768  }
2769  }
2770 
2771  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2772  av_assert0(s->nb_components == 3);
2773  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2774  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2775  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2776  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2777  }
2778 
2779  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2780  int w = s->picture_ptr->width;
2781  int h = s->picture_ptr->height;
2782  av_assert0(s->nb_components == 4);
2783  for (int i = 0; i < h; i++) {
2784  int j;
2785  uint8_t *dst[4];
2786  for (index=0; index<4; index++) {
2787  dst[index] = s->picture_ptr->data[index]
2788  + s->picture_ptr->linesize[index]*i;
2789  }
2790  for (j=0; j<w; j++) {
2791  int k = dst[3][j];
2792  int r = dst[0][j] * k;
2793  int g = dst[1][j] * k;
2794  int b = dst[2][j] * k;
2795  dst[0][j] = g*257 >> 16;
2796  dst[1][j] = b*257 >> 16;
2797  dst[2][j] = r*257 >> 16;
2798  }
2799  memset(dst[3], 255, w);
2800  }
2801  }
2802  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2803  int w = s->picture_ptr->width;
2804  int h = s->picture_ptr->height;
2805  av_assert0(s->nb_components == 4);
2806  for (int i = 0; i < h; i++) {
2807  int j;
2808  uint8_t *dst[4];
2809  for (index=0; index<4; index++) {
2810  dst[index] = s->picture_ptr->data[index]
2811  + s->picture_ptr->linesize[index]*i;
2812  }
2813  for (j=0; j<w; j++) {
2814  int k = dst[3][j];
2815  int r = (255 - dst[0][j]) * k;
2816  int g = (128 - dst[1][j]) * k;
2817  int b = (128 - dst[2][j]) * k;
2818  dst[0][j] = r*257 >> 16;
2819  dst[1][j] = (g*257 >> 16) + 128;
2820  dst[2][j] = (b*257 >> 16) + 128;
2821  }
2822  memset(dst[3], 255, w);
2823  }
2824  }
2825 
2826  if (s->stereo3d) {
2828  if (stereo) {
2829  stereo->type = s->stereo3d->type;
2830  stereo->flags = s->stereo3d->flags;
2831  }
2832  av_freep(&s->stereo3d);
2833  }
2834 
2835  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2836  AVFrameSideData *sd;
2837  size_t offset = 0;
2838  int total_size = 0;
2839 
2840  /* Sum size of all parts. */
2841  for (int i = 0; i < s->iccnum; i++)
2842  total_size += s->iccentries[i].length;
2843 
2844  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2845  if (ret < 0) {
2846  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2847  return ret;
2848  }
2849 
2850  if (sd) {
2851  /* Reassemble the parts, which are now in-order. */
2852  for (int i = 0; i < s->iccnum; i++) {
2853  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2854  offset += s->iccentries[i].length;
2855  }
2856  }
2857  }
2858 
2859  if (s->exif_metadata.entries) {
2860  ret = ff_decode_exif_attach_ifd(avctx, frame, &s->exif_metadata);
2861  av_exif_free(&s->exif_metadata);
2862  if (ret < 0)
2863  av_log(avctx, AV_LOG_WARNING, "couldn't attach EXIF metadata\n");
2864  }
2865 
2866  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2867  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2868  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2869  avctx->coded_height > s->orig_height) {
2870  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2871  frame->crop_top = frame->height - avctx->height;
2872  }
2873 
2874 the_end_no_picture:
2875  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2876  buf_end - buf_ptr);
2877  return buf_ptr - buf;
2878 }
2879 
2880 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2881  AVPacket *avpkt)
2882 {
2883  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2884  avpkt, avpkt->data, avpkt->size);
2885 }
2886 
2887 
2888 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2889  * even without having called ff_mjpeg_decode_init(). */
2891 {
2892  MJpegDecodeContext *s = avctx->priv_data;
2893  int i, j;
2894 
2895  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2896  av_log(avctx, AV_LOG_INFO, "Single field\n");
2897  }
2898 
2899  av_frame_free(&s->picture);
2900  s->picture_ptr = NULL;
2901 
2902  av_frame_free(&s->smv_frame);
2903 
2904  av_freep(&s->buffer);
2905  av_freep(&s->stereo3d);
2906  av_freep(&s->ljpeg_buffer);
2907  s->ljpeg_buffer_size = 0;
2908 
2909  for (i = 0; i < 3; i++) {
2910  for (j = 0; j < 4; j++)
2911  ff_vlc_free(&s->vlcs[i][j]);
2912  }
2913  for (i = 0; i < MAX_COMPONENTS; i++) {
2914  av_freep(&s->blocks[i]);
2915  av_freep(&s->last_nnz[i]);
2916  }
2917  av_exif_free(&s->exif_metadata);
2918 
2920 
2921  av_freep(&s->hwaccel_picture_private);
2922  av_freep(&s->jls_state);
2923 
2924  return 0;
2925 }
2926 
2927 static void decode_flush(AVCodecContext *avctx)
2928 {
2929  MJpegDecodeContext *s = avctx->priv_data;
2930  s->got_picture = 0;
2931 
2932  s->smv_next_frame = 0;
2933  av_frame_unref(s->smv_frame);
2934 }
2935 
2936 #if CONFIG_MJPEG_DECODER
2937 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2938 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2939 static const AVOption options[] = {
2940  { "extern_huff", "Use external huffman table.",
2941  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2942  { NULL },
2943 };
2944 
2945 static const AVClass mjpegdec_class = {
2946  .class_name = "MJPEG decoder",
2947  .item_name = av_default_item_name,
2948  .option = options,
2949  .version = LIBAVUTIL_VERSION_INT,
2950 };
2951 
2952 const FFCodec ff_mjpeg_decoder = {
2953  .p.name = "mjpeg",
2954  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
2955  .p.type = AVMEDIA_TYPE_VIDEO,
2956  .p.id = AV_CODEC_ID_MJPEG,
2957  .priv_data_size = sizeof(MJpegDecodeContext),
2961  .flush = decode_flush,
2962  .p.capabilities = AV_CODEC_CAP_DR1,
2963  .p.max_lowres = 3,
2964  .p.priv_class = &mjpegdec_class,
2965  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
2966  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
2969  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2970 #if CONFIG_MJPEG_NVDEC_HWACCEL
2971  HWACCEL_NVDEC(mjpeg),
2972 #endif
2973 #if CONFIG_MJPEG_VAAPI_HWACCEL
2974  HWACCEL_VAAPI(mjpeg),
2975 #endif
2976  NULL
2977  },
2978 };
2979 #endif
2980 #if CONFIG_THP_DECODER
2981 const FFCodec ff_thp_decoder = {
2982  .p.name = "thp",
2983  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
2984  .p.type = AVMEDIA_TYPE_VIDEO,
2985  .p.id = AV_CODEC_ID_THP,
2986  .priv_data_size = sizeof(MJpegDecodeContext),
2990  .flush = decode_flush,
2991  .p.capabilities = AV_CODEC_CAP_DR1,
2992  .p.max_lowres = 3,
2993  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2994 };
2995 #endif
2996 
2997 #if CONFIG_SMVJPEG_DECODER
2998 // SMV JPEG just stacks several output frames into one JPEG picture
2999 // we handle that by setting up the cropping parameters appropriately
3000 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
3001 {
3002  MJpegDecodeContext *s = avctx->priv_data;
3003 
3004  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
3005 
3006  frame->width = avctx->coded_width;
3007  frame->height = avctx->coded_height;
3008  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3009  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3010 
3011  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3012  s->smv_frame->pts += s->smv_frame->duration;
3013  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3014 
3015  if (s->smv_next_frame == 0)
3016  av_frame_unref(s->smv_frame);
3017 }
3018 
3019 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3020 {
3021  MJpegDecodeContext *s = avctx->priv_data;
3022  AVPacket *const pkt = avctx->internal->in_pkt;
3023  int got_frame = 0;
3024  int ret;
3025 
3026  if (s->smv_next_frame > 0)
3027  goto return_frame;
3028 
3029  ret = ff_decode_get_packet(avctx, pkt);
3030  if (ret < 0)
3031  return ret;
3032 
3033  av_frame_unref(s->smv_frame);
3034 
3035  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3036  s->smv_frame->pkt_dts = pkt->dts;
3038  if (ret < 0)
3039  return ret;
3040 
3041  if (!got_frame)
3042  return AVERROR(EAGAIN);
3043 
3044  // packet duration covers all the frames in the packet
3045  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3046 
3047 return_frame:
3048  av_assert0(s->smv_frame->buf[0]);
3049  ret = av_frame_ref(frame, s->smv_frame);
3050  if (ret < 0)
3051  return ret;
3052 
3053  smv_process_frame(avctx, frame);
3054  return 0;
3055 }
3056 
3057 const FFCodec ff_smvjpeg_decoder = {
3058  .p.name = "smvjpeg",
3059  CODEC_LONG_NAME("SMV JPEG"),
3060  .p.type = AVMEDIA_TYPE_VIDEO,
3061  .p.id = AV_CODEC_ID_SMVJPEG,
3062  .priv_data_size = sizeof(MJpegDecodeContext),
3065  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3066  .flush = decode_flush,
3067  .p.capabilities = AV_CODEC_CAP_DR1,
3068  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3070 };
3071 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
flags
const SwsFlags flags[]
Definition: swscale.c:61
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:433
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:276
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:245
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:208
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:493
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:689
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:360
opt.h
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:764
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:659
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1203
out
FILE * out
Definition: movenc.c:55
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1428
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2927
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3447
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:984
SOF0
@ SOF0
Definition: mjpeg.h:39
src1
const pixel * src1
Definition: h264pred_template.c:420
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1398
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:568
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:419
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:260
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:250
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:114
mask
int mask
Definition: mediacodecdec_common.c:154
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
AVFrame::width
int width
Definition: frame.h:499
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:595
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:767
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:558
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:682
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:819
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:149
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:209
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:512
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1375
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:247
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:379
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3487
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:696
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:240
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1274
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1444
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:123
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1662
fail
#define fail()
Definition: checkasm.h:200
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:597
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:109
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2363
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2164
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:60
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:609
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3475
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:191
aligned
static int aligned(int val)
Definition: dashdec.c:171
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:885
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:551
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1638
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:173
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:212
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1067
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:185
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:346
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:103
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:552
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:355
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:550
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2349
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2890
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:331
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:337
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:212
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:561
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:529
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2410
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
av_clip_int16
#define av_clip_int16
Definition: common.h:115
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:530
NULL
#define NULL
Definition: coverity.c:32
exif_internal.h
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1635
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:174
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:197
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:241
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:386
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
options
Definition: swscale.c:43
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:231
MJpegDecodeContext
Definition: mjpegdec.h:55
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1459
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:1002
lowres
static int lowres
Definition: ffplay.c:330
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1580
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:646
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1697
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1720
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1102
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:559
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2880
av_bswap32
#define av_bswap32
Definition: bswap.h:47
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:903
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1670
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:177
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2114
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:174
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:557
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:290
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2204
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:194
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:177
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2315
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:837
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:176
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:369
VD
#define VD
Definition: amfdec.c:671
src2
const pixel * src2
Definition: h264pred_template.c:421
display.h
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:215
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1845
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:167
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1382
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:676
len
int len
Definition: vorbis_enc_data.h:426
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:750
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1878
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:976
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:35
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:555
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
pos
unsigned int pos
Definition: spdifenc.c:414
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1379
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:367
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2229
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:499
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:354
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:243
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:789
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1618
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:264
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1374
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:301
APP0
@ APP0
Definition: mjpeg.h:79
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1857
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:535
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
bytestream.h
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1637
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:46
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:175
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:383
src
#define src
Definition: vp8dsp.c:248
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347