FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "config_components.h"
34 
35 #include "libavutil/attributes.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/mem.h"
39 #include "libavutil/opt.h"
40 #include "avcodec.h"
41 #include "blockdsp.h"
42 #include "codec_internal.h"
43 #include "copy_block.h"
44 #include "decode.h"
45 #include "exif.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "idctdsp.h"
49 #include "internal.h"
50 #include "jpegtables.h"
51 #include "mjpeg.h"
52 #include "mjpegdec.h"
53 #include "jpeglsdec.h"
54 #include "profiles.h"
55 #include "put_bits.h"
56 
57 
59 {
60  static const struct {
61  int class;
62  int index;
63  const uint8_t *bits;
64  const uint8_t *values;
65  int length;
66  } ht[] = {
68  ff_mjpeg_val_dc, 12 },
70  ff_mjpeg_val_dc, 12 },
79  };
80  int i, ret;
81 
82  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
83  ff_vlc_free(&s->vlcs[ht[i].class][ht[i].index]);
84  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
85  ht[i].bits, ht[i].values,
86  ht[i].class == 1, s->avctx);
87  if (ret < 0)
88  return ret;
89 
90  if (ht[i].class < 2) {
91  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
92  ht[i].bits + 1, 16);
93  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
94  ht[i].values, ht[i].length);
95  }
96  }
97 
98  return 0;
99 }
100 
101 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
102 {
103  s->buggy_avid = 1;
104  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
105  s->interlace_polarity = 1;
106  if (len > 14 && buf[12] == 2) /* 2 - PAL */
107  s->interlace_polarity = 0;
108  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
109  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
110 }
111 
112 static void init_idct(AVCodecContext *avctx)
113 {
114  MJpegDecodeContext *s = avctx->priv_data;
115 
116  ff_idctdsp_init(&s->idsp, avctx);
117  ff_permute_scantable(s->permutated_scantable, ff_zigzag_direct,
118  s->idsp.idct_permutation);
119 }
120 
122 {
123  MJpegDecodeContext *s = avctx->priv_data;
124  int ret;
125 
126  if (!s->picture_ptr) {
127  s->picture = av_frame_alloc();
128  if (!s->picture)
129  return AVERROR(ENOMEM);
130  s->picture_ptr = s->picture;
131  }
132 
133  s->avctx = avctx;
134  ff_blockdsp_init(&s->bdsp);
135  init_idct(avctx);
136  s->buffer_size = 0;
137  s->buffer = NULL;
138  s->start_code = -1;
139  s->first_picture = 1;
140  s->got_picture = 0;
141  s->orig_height = avctx->coded_height;
143  avctx->colorspace = AVCOL_SPC_BT470BG;
144  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
145 
146  if ((ret = init_default_huffman_tables(s)) < 0)
147  return ret;
148 
149  if (s->extern_huff) {
150  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
151  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
152  return ret;
153  if (ff_mjpeg_decode_dht(s)) {
154  av_log(avctx, AV_LOG_ERROR,
155  "error using external huffman table, switching back to internal\n");
156  if ((ret = init_default_huffman_tables(s)) < 0)
157  return ret;
158  }
159  }
160  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
161  s->interlace_polarity = 1; /* bottom field first */
162  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
163  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
164  if (avctx->codec_tag == AV_RL32("MJPG"))
165  s->interlace_polarity = 1;
166  }
167 
168  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
169  if (avctx->extradata_size >= 4)
170  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
171 
172  if (s->smv_frames_per_jpeg <= 0) {
173  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
174  return AVERROR_INVALIDDATA;
175  }
176 
177  s->smv_frame = av_frame_alloc();
178  if (!s->smv_frame)
179  return AVERROR(ENOMEM);
180  } else if (avctx->extradata_size > 8
181  && AV_RL32(avctx->extradata) == 0x2C
182  && AV_RL32(avctx->extradata+4) == 0x18) {
183  parse_avid(s, avctx->extradata, avctx->extradata_size);
184  }
185 
186  if (avctx->codec->id == AV_CODEC_ID_AMV)
187  s->flipped = 1;
188 
189  return 0;
190 }
191 
192 
193 /* quantize tables */
195 {
196  int len, index, i;
197 
198  len = get_bits(&s->gb, 16) - 2;
199 
200  if (8*len > get_bits_left(&s->gb)) {
201  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
202  return AVERROR_INVALIDDATA;
203  }
204 
205  while (len >= 65) {
206  int pr = get_bits(&s->gb, 4);
207  if (pr > 1) {
208  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
209  return AVERROR_INVALIDDATA;
210  }
211  index = get_bits(&s->gb, 4);
212  if (index >= 4)
213  return AVERROR_INVALIDDATA;
214  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
215  /* read quant table */
216  for (i = 0; i < 64; i++) {
217  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
218  if (s->quant_matrixes[index][i] == 0) {
219  int log_level = s->avctx->err_recognition & AV_EF_EXPLODE ? AV_LOG_ERROR : AV_LOG_WARNING;
220  av_log(s->avctx, log_level, "dqt: 0 quant value\n");
221  if (s->avctx->err_recognition & AV_EF_EXPLODE)
222  return AVERROR_INVALIDDATA;
223  }
224  }
225 
226  // XXX FIXME fine-tune, and perhaps add dc too
227  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
228  s->quant_matrixes[index][8]) >> 1;
229  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
230  index, s->qscale[index]);
231  len -= 1 + 64 * (1+pr);
232  }
233  return 0;
234 }
235 
236 /* decode huffman tables and build VLC decoders */
238 {
239  int len, index, i, class, n, v;
240  uint8_t bits_table[17];
241  uint8_t val_table[256];
242  int ret = 0;
243 
244  len = get_bits(&s->gb, 16) - 2;
245 
246  if (8*len > get_bits_left(&s->gb)) {
247  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
248  return AVERROR_INVALIDDATA;
249  }
250 
251  while (len > 0) {
252  if (len < 17)
253  return AVERROR_INVALIDDATA;
254  class = get_bits(&s->gb, 4);
255  if (class >= 2)
256  return AVERROR_INVALIDDATA;
257  index = get_bits(&s->gb, 4);
258  if (index >= 4)
259  return AVERROR_INVALIDDATA;
260  n = 0;
261  for (i = 1; i <= 16; i++) {
262  bits_table[i] = get_bits(&s->gb, 8);
263  n += bits_table[i];
264  }
265  len -= 17;
266  if (len < n || n > 256)
267  return AVERROR_INVALIDDATA;
268 
269  for (i = 0; i < n; i++) {
270  v = get_bits(&s->gb, 8);
271  val_table[i] = v;
272  }
273  len -= n;
274 
275  /* build VLC and flush previous vlc if present */
276  ff_vlc_free(&s->vlcs[class][index]);
277  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
278  class, index, n);
279  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
280  val_table, class > 0, s->avctx)) < 0)
281  return ret;
282 
283  if (class > 0) {
284  ff_vlc_free(&s->vlcs[2][index]);
285  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
286  val_table, 0, s->avctx)) < 0)
287  return ret;
288  }
289 
290  for (i = 0; i < 16; i++)
291  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
292  for (i = 0; i < 256; i++)
293  s->raw_huffman_values[class][index][i] = val_table[i];
294  }
295  return 0;
296 }
297 
299 {
300  int len, nb_components, i, width, height, bits, ret, size_change;
301  unsigned pix_fmt_id;
302  int h_count[MAX_COMPONENTS] = { 0 };
303  int v_count[MAX_COMPONENTS] = { 0 };
304 
305  s->cur_scan = 0;
306  memset(s->upscale_h, 0, sizeof(s->upscale_h));
307  memset(s->upscale_v, 0, sizeof(s->upscale_v));
308 
309  len = get_bits(&s->gb, 16);
310  bits = get_bits(&s->gb, 8);
311 
312  if (bits > 16 || bits < 1) {
313  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
314  return AVERROR_INVALIDDATA;
315  }
316 
317  if (s->avctx->bits_per_raw_sample != bits) {
318  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
319  s->avctx->bits_per_raw_sample = bits;
320  init_idct(s->avctx);
321  }
322  if (s->pegasus_rct)
323  bits = 9;
324  if (bits == 9 && !s->pegasus_rct)
325  s->rct = 1; // FIXME ugly
326 
327  if(s->lossless && s->avctx->lowres){
328  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
329  return AVERROR(ENOSYS);
330  }
331 
332  height = get_bits(&s->gb, 16);
333  width = get_bits(&s->gb, 16);
334 
335  // HACK for odd_height.mov
336  if (s->interlaced && s->width == width && s->height == height + 1)
337  height= s->height;
338 
339  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
340  if (av_image_check_size(width, height, 0, s->avctx) < 0)
341  return AVERROR_INVALIDDATA;
342 
343  if (!s->progressive && !s->ls) {
344  // A valid frame requires at least 1 bit for DC + 1 bit for AC for each 8x8 block.
345  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
346  return AVERROR_INVALIDDATA;
347  }
348 
349  nb_components = get_bits(&s->gb, 8);
350  if (nb_components <= 0 ||
351  nb_components > MAX_COMPONENTS)
352  return AVERROR_INVALIDDATA;
353  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
354  if (nb_components != s->nb_components) {
355  av_log(s->avctx, AV_LOG_ERROR,
356  "nb_components changing in interlaced picture\n");
357  return AVERROR_INVALIDDATA;
358  }
359  }
360  if (s->ls && !(bits <= 8 || nb_components == 1)) {
362  "JPEG-LS that is not <= 8 "
363  "bits/component or 16-bit gray");
364  return AVERROR_PATCHWELCOME;
365  }
366  if (len != 8 + 3 * nb_components) {
367  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
368  return AVERROR_INVALIDDATA;
369  }
370 
371  s->nb_components = nb_components;
372  s->h_max = 1;
373  s->v_max = 1;
374  for (i = 0; i < nb_components; i++) {
375  /* component id */
376  s->component_id[i] = get_bits(&s->gb, 8);
377  h_count[i] = get_bits(&s->gb, 4);
378  v_count[i] = get_bits(&s->gb, 4);
379  /* compute hmax and vmax (only used in interleaved case) */
380  if (h_count[i] > s->h_max)
381  s->h_max = h_count[i];
382  if (v_count[i] > s->v_max)
383  s->v_max = v_count[i];
384  s->quant_index[i] = get_bits(&s->gb, 8);
385  if (s->quant_index[i] >= 4) {
386  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
387  return AVERROR_INVALIDDATA;
388  }
389  if (!h_count[i] || !v_count[i]) {
390  av_log(s->avctx, AV_LOG_ERROR,
391  "Invalid sampling factor in component %d %d:%d\n",
392  i, h_count[i], v_count[i]);
393  return AVERROR_INVALIDDATA;
394  }
395 
396  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
397  i, h_count[i], v_count[i],
398  s->component_id[i], s->quant_index[i]);
399  }
400  if ( nb_components == 4
401  && s->component_id[0] == 'C'
402  && s->component_id[1] == 'M'
403  && s->component_id[2] == 'Y'
404  && s->component_id[3] == 'K')
405  s->adobe_transform = 0;
406 
407  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
408  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
409  return AVERROR_PATCHWELCOME;
410  }
411 
412  if (s->bayer) {
413  if (nb_components == 2) {
414  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
415  width stored in their SOF3 markers is the width of each one. We only output
416  a single component, therefore we need to adjust the output image width. We
417  handle the deinterleaving (but not the debayering) in this file. */
418  width *= 2;
419  }
420  /* They can also contain 1 component, which is double the width and half the height
421  of the final image (rows are interleaved). We don't handle the decoding in this
422  file, but leave that to the TIFF/DNG decoder. */
423  }
424 
425  /* if different size, realloc/alloc picture */
426  if (width != s->width || height != s->height || bits != s->bits ||
427  memcmp(s->h_count, h_count, sizeof(h_count)) ||
428  memcmp(s->v_count, v_count, sizeof(v_count))) {
429  size_change = 1;
430 
431  s->width = width;
432  s->height = height;
433  s->bits = bits;
434  memcpy(s->h_count, h_count, sizeof(h_count));
435  memcpy(s->v_count, v_count, sizeof(v_count));
436  s->interlaced = 0;
437  s->got_picture = 0;
438 
439  /* test interlaced mode */
440  if (s->first_picture &&
441  (s->multiscope != 2 || s->avctx->pkt_timebase.den >= 25 * s->avctx->pkt_timebase.num) &&
442  s->orig_height != 0 &&
443  s->height < ((s->orig_height * 3) / 4)) {
444  s->interlaced = 1;
445  s->bottom_field = s->interlace_polarity;
446  s->picture_ptr->flags |= AV_FRAME_FLAG_INTERLACED;
447  s->picture_ptr->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !s->interlace_polarity;
448  height *= 2;
449  }
450 
451  ret = ff_set_dimensions(s->avctx, width, height);
452  if (ret < 0)
453  return ret;
454 
455  if (s->avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
456  (s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
457  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
458  s->orig_height < height)
459  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
460 
461  s->first_picture = 0;
462  } else {
463  size_change = 0;
464  }
465 
466  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
467  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
468  if (s->avctx->height <= 0)
469  return AVERROR_INVALIDDATA;
470  }
471  if (s->bayer && s->progressive) {
472  avpriv_request_sample(s->avctx, "progressively coded bayer picture");
473  return AVERROR_INVALIDDATA;
474  }
475 
476  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
477  if (s->progressive) {
478  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
479  return AVERROR_INVALIDDATA;
480  }
481  } else {
482  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
483  s->rgb = 1;
484  else if (!s->lossless)
485  s->rgb = 0;
486  /* XXX: not complete test ! */
487  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
488  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
489  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
490  (s->h_count[3] << 4) | s->v_count[3];
491  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
492  /* NOTE we do not allocate pictures large enough for the possible
493  * padding of h/v_count being 4 */
494  if (!(pix_fmt_id & 0xD0D0D0D0))
495  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
496  if (!(pix_fmt_id & 0x0D0D0D0D))
497  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
498 
499  for (i = 0; i < 8; i++) {
500  int j = 6 + (i&1) - (i&6);
501  int is = (pix_fmt_id >> (4*i)) & 0xF;
502  int js = (pix_fmt_id >> (4*j)) & 0xF;
503 
504  if (is == 1 && js != 2 && (i < 2 || i > 5))
505  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
506  if (is == 1 && js != 2 && (i < 2 || i > 5))
507  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
508 
509  if (is == 1 && js == 2) {
510  if (i & 1) s->upscale_h[j/2] = 1;
511  else s->upscale_v[j/2] = 1;
512  }
513  }
514 
515  if (s->bayer) {
516  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
517  goto unk_pixfmt;
518  }
519 
520  switch (pix_fmt_id) {
521  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
522  if (!s->bayer)
523  goto unk_pixfmt;
524  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
525  break;
526  case 0x11111100:
527  if (s->rgb)
528  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
529  else {
530  if ( s->adobe_transform == 0
531  || s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
532  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
533  } else {
534  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
535  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
536  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
537  }
538  }
539  av_assert0(s->nb_components == 3);
540  break;
541  case 0x11111111:
542  if (s->rgb)
543  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
544  else {
545  if (s->adobe_transform == 0 && s->bits <= 8) {
546  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
547  } else {
548  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
549  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
550  }
551  }
552  av_assert0(s->nb_components == 4);
553  break;
554  case 0x11412100:
555  if (s->bits > 8)
556  goto unk_pixfmt;
557  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
558  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
559  s->upscale_h[0] = 4;
560  s->upscale_h[1] = 0;
561  s->upscale_h[2] = 1;
562  } else {
563  goto unk_pixfmt;
564  }
565  break;
566  case 0x22111122:
567  case 0x22111111:
568  if (s->adobe_transform == 0 && s->bits <= 8) {
569  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
570  s->upscale_v[1] = s->upscale_v[2] = 1;
571  s->upscale_h[1] = s->upscale_h[2] = 1;
572  } else if (s->adobe_transform == 2 && s->bits <= 8) {
573  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
574  s->upscale_v[1] = s->upscale_v[2] = 1;
575  s->upscale_h[1] = s->upscale_h[2] = 1;
576  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
577  } else {
578  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
579  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
580  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
581  }
582  av_assert0(s->nb_components == 4);
583  break;
584  case 0x12121100:
585  case 0x22122100:
586  case 0x21211100:
587  case 0x21112100:
588  case 0x22211200:
589  case 0x22221100:
590  case 0x22112200:
591  case 0x11222200:
592  if (s->bits > 8)
593  goto unk_pixfmt;
594  if (s->adobe_transform == 0 || s->component_id[0] == 'R' &&
595  s->component_id[1] == 'G' && s->component_id[2] == 'B') {
596  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
597  } else {
598  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
599  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
600  }
601  break;
602  case 0x11000000:
603  case 0x13000000:
604  case 0x14000000:
605  case 0x31000000:
606  case 0x33000000:
607  case 0x34000000:
608  case 0x41000000:
609  case 0x43000000:
610  case 0x44000000:
611  if(s->bits <= 8)
612  s->avctx->pix_fmt = s->force_pal8 ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
613  else
614  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
615  break;
616  case 0x12111100:
617  case 0x14121200:
618  case 0x14111100:
619  case 0x22211100:
620  case 0x22112100:
621  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
622  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
623  else
624  goto unk_pixfmt;
625  s->upscale_v[1] = s->upscale_v[2] = 1;
626  } else {
627  if (pix_fmt_id == 0x14111100)
628  s->upscale_v[1] = s->upscale_v[2] = 1;
629  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
630  else
631  goto unk_pixfmt;
632  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
633  }
634  break;
635  case 0x21111100:
636  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B') {
637  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
638  else
639  goto unk_pixfmt;
640  s->upscale_h[1] = s->upscale_h[2] = 1;
641  } else {
642  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
643  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
644  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
645  }
646  break;
647  case 0x11311100:
648  if (s->bits > 8)
649  goto unk_pixfmt;
650  if (s->component_id[0] == 'R' && s->component_id[1] == 'G' && s->component_id[2] == 'B')
651  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
652  else
653  goto unk_pixfmt;
654  s->upscale_h[0] = s->upscale_h[2] = 2;
655  break;
656  case 0x31111100:
657  if (s->bits > 8)
658  goto unk_pixfmt;
659  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
660  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
661  s->upscale_h[1] = s->upscale_h[2] = 2;
662  break;
663  case 0x22121100:
664  case 0x22111200:
665  case 0x41211100:
666  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
667  else
668  goto unk_pixfmt;
669  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
670  break;
671  case 0x22111100:
672  case 0x23111100:
673  case 0x42111100:
674  case 0x24111100:
675  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
676  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
677  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
678  if (pix_fmt_id == 0x42111100) {
679  if (s->bits > 8)
680  goto unk_pixfmt;
681  s->upscale_h[1] = s->upscale_h[2] = 1;
682  } else if (pix_fmt_id == 0x24111100) {
683  if (s->bits > 8)
684  goto unk_pixfmt;
685  s->upscale_v[1] = s->upscale_v[2] = 1;
686  } else if (pix_fmt_id == 0x23111100) {
687  if (s->bits > 8)
688  goto unk_pixfmt;
689  s->upscale_v[1] = s->upscale_v[2] = 2;
690  }
691  break;
692  case 0x41111100:
693  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
694  else
695  goto unk_pixfmt;
696  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
697  break;
698  default:
699  unk_pixfmt:
700  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
701  memset(s->upscale_h, 0, sizeof(s->upscale_h));
702  memset(s->upscale_v, 0, sizeof(s->upscale_v));
703  return AVERROR_PATCHWELCOME;
704  }
705  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
706  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
707  return AVERROR_PATCHWELCOME;
708  }
709  if (s->ls) {
710  memset(s->upscale_h, 0, sizeof(s->upscale_h));
711  memset(s->upscale_v, 0, sizeof(s->upscale_v));
712  if (s->nb_components == 3) {
713  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
714  } else if (s->nb_components != 1) {
715  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
716  return AVERROR_PATCHWELCOME;
717  } else if ((s->palette_index || s->force_pal8) && s->bits <= 8)
718  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
719  else if (s->bits <= 8)
720  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
721  else
722  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
723  }
724 
725  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
726  if (!s->pix_desc) {
727  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
728  return AVERROR_BUG;
729  }
730 
731  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
732  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
733  } else {
734  enum AVPixelFormat pix_fmts[] = {
735 #if CONFIG_MJPEG_NVDEC_HWACCEL
737 #endif
738 #if CONFIG_MJPEG_VAAPI_HWACCEL
740 #endif
741  s->avctx->pix_fmt,
743  };
744  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
745  if (s->hwaccel_pix_fmt < 0)
746  return AVERROR(EINVAL);
747 
748  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
749  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
750  }
751 
752  if (s->avctx->skip_frame == AVDISCARD_ALL) {
753  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
754  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
755  s->got_picture = 1;
756  return 0;
757  }
758 
759  av_frame_unref(s->picture_ptr);
760  ret = ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF);
761  if (ret < 0)
762  return ret;
763  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
764  s->picture_ptr->flags |= AV_FRAME_FLAG_KEY;
765  s->got_picture = 1;
766 
767  // Lets clear the palette to avoid leaving uninitialized values in it
768  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
769  memset(s->picture_ptr->data[1], 0, 1024);
770 
771  for (i = 0; i < 4; i++)
772  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
773 
774  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
775  s->width, s->height, s->linesize[0], s->linesize[1],
776  s->interlaced, s->avctx->height);
777 
778  }
779 
780  if ((s->rgb && !s->lossless && !s->ls) ||
781  (!s->rgb && s->ls && s->nb_components > 1) ||
782  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
783  ) {
784  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
785  return AVERROR_PATCHWELCOME;
786  }
787 
788  /* totally blank picture as progressive JPEG will only add details to it */
789  if (s->progressive) {
790  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
791  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
792  for (i = 0; i < s->nb_components; i++) {
793  int size = bw * bh * s->h_count[i] * s->v_count[i];
794  av_freep(&s->blocks[i]);
795  av_freep(&s->last_nnz[i]);
796  s->blocks[i] = av_calloc(size, sizeof(**s->blocks));
797  s->last_nnz[i] = av_calloc(size, sizeof(**s->last_nnz));
798  if (!s->blocks[i] || !s->last_nnz[i])
799  return AVERROR(ENOMEM);
800  s->block_stride[i] = bw * s->h_count[i];
801  }
802  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
803  }
804 
805  if (s->avctx->hwaccel) {
806  const FFHWAccel *hwaccel = ffhwaccel(s->avctx->hwaccel);
807  s->hwaccel_picture_private =
808  av_mallocz(hwaccel->frame_priv_data_size);
809  if (!s->hwaccel_picture_private)
810  return AVERROR(ENOMEM);
811 
812  ret = hwaccel->start_frame(s->avctx, NULL, s->raw_image_buffer,
813  s->raw_image_buffer_size);
814  if (ret < 0)
815  return ret;
816  }
817 
818  return 0;
819 }
820 
821 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
822 {
823  int code;
824  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
825  if (code < 0 || code > 16) {
826  av_log(s->avctx, AV_LOG_ERROR,
827  "mjpeg_decode_dc: bad vlc: %d\n", dc_index);
828  return AVERROR_INVALIDDATA;
829  }
830 
831  *val = code ? get_xbits(&s->gb, code) : 0;
832  return 0;
833 }
834 
835 /* decode block and dequantize */
836 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
837  int dc_index, int ac_index, uint16_t *quant_matrix)
838 {
839  int code, i, j, level, val;
840 
841  /* DC coef */
842  int ret = mjpeg_decode_dc(s, dc_index, &val);
843  if (ret < 0)
844  return ret;
845 
846  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
847  s->last_dc[component] = val;
848  block[0] = av_clip_int16(val);
849  /* AC coefs */
850  i = 0;
851  {OPEN_READER(re, &s->gb);
852  do {
853  UPDATE_CACHE(re, &s->gb);
854  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
855 
856  i += ((unsigned)code) >> 4;
857  code &= 0xf;
858  if (code) {
859  // GET_VLC updates the cache if parsing reaches the second stage.
860  // So we have at least MIN_CACHE_BITS - 9 > 15 bits left here
861  // and don't need to refill the cache.
862  {
863  int cache = GET_CACHE(re, &s->gb);
864  int sign = (~cache) >> 31;
865  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
866  }
867 
868  LAST_SKIP_BITS(re, &s->gb, code);
869 
870  if (i > 63) {
871  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
872  return AVERROR_INVALIDDATA;
873  }
874  j = s->permutated_scantable[i];
875  block[j] = level * quant_matrix[i];
876  }
877  } while (i < 63);
878  CLOSE_READER(re, &s->gb);}
879 
880  return 0;
881 }
882 
884  int component, int dc_index,
885  uint16_t *quant_matrix, int Al)
886 {
887  unsigned val;
888  s->bdsp.clear_block(block);
889  int ret = mjpeg_decode_dc(s, dc_index, &val);
890  if (ret < 0)
891  return ret;
892 
893  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
894  s->last_dc[component] = val;
895  block[0] = val;
896  return 0;
897 }
898 
899 /* decode block and dequantize - progressive JPEG version */
901  uint8_t *last_nnz, int ac_index,
902  uint16_t *quant_matrix,
903  int ss, int se, int Al, int *EOBRUN)
904 {
905  int code, i, j, val, run;
906  unsigned level;
907 
908  if (*EOBRUN) {
909  (*EOBRUN)--;
910  return 0;
911  }
912 
913  {
914  OPEN_READER(re, &s->gb);
915  for (i = ss; ; i++) {
916  UPDATE_CACHE(re, &s->gb);
917  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
918 
919  run = ((unsigned) code) >> 4;
920  code &= 0xF;
921  if (code) {
922  i += run;
923 
924  {
925  int cache = GET_CACHE(re, &s->gb);
926  int sign = (~cache) >> 31;
927  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
928  }
929 
930  LAST_SKIP_BITS(re, &s->gb, code);
931 
932  if (i >= se) {
933  if (i == se) {
934  j = s->permutated_scantable[se];
935  block[j] = level * (quant_matrix[se] << Al);
936  break;
937  }
938  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
939  return AVERROR_INVALIDDATA;
940  }
941  j = s->permutated_scantable[i];
942  block[j] = level * (quant_matrix[i] << Al);
943  } else {
944  if (run == 0xF) {// ZRL - skip 15 coefficients
945  i += 15;
946  if (i >= se) {
947  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
948  return AVERROR_INVALIDDATA;
949  }
950  } else {
951  val = (1 << run);
952  if (run) {
953  // Given that GET_VLC reloads internally, we always
954  // have at least 16 bits in the cache here.
955  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
956  LAST_SKIP_BITS(re, &s->gb, run);
957  }
958  *EOBRUN = val - 1;
959  break;
960  }
961  }
962  }
963  CLOSE_READER(re, &s->gb);
964  }
965 
966  if (i > *last_nnz)
967  *last_nnz = i;
968 
969  return 0;
970 }
971 
972 #define REFINE_BIT(j) { \
973  UPDATE_CACHE(re, &s->gb); \
974  sign = block[j] >> 15; \
975  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
976  ((quant_matrix[i] ^ sign) - sign) << Al; \
977  LAST_SKIP_BITS(re, &s->gb, 1); \
978 }
979 
980 #define ZERO_RUN \
981 for (; ; i++) { \
982  if (i > last) { \
983  i += run; \
984  if (i > se) { \
985  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
986  return -1; \
987  } \
988  break; \
989  } \
990  j = s->permutated_scantable[i]; \
991  if (block[j]) \
992  REFINE_BIT(j) \
993  else if (run-- == 0) \
994  break; \
995 }
996 
997 /* decode block and dequantize - progressive JPEG refinement pass */
999  uint8_t *last_nnz,
1000  int ac_index, uint16_t *quant_matrix,
1001  int ss, int se, int Al, int *EOBRUN)
1002 {
1003  int code, i = ss, j, sign, val, run;
1004  int last = FFMIN(se, *last_nnz);
1005 
1006  OPEN_READER(re, &s->gb);
1007  if (*EOBRUN) {
1008  (*EOBRUN)--;
1009  } else {
1010  for (; ; i++) {
1011  UPDATE_CACHE(re, &s->gb);
1012  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
1013 
1014  if (code & 0xF) {
1015  run = ((unsigned) code) >> 4;
1016  val = SHOW_UBITS(re, &s->gb, 1);
1017  LAST_SKIP_BITS(re, &s->gb, 1);
1018  ZERO_RUN;
1019  j = s->permutated_scantable[i];
1020  val--;
1021  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
1022  if (i == se) {
1023  if (i > *last_nnz)
1024  *last_nnz = i;
1025  CLOSE_READER(re, &s->gb);
1026  return 0;
1027  }
1028  } else {
1029  run = ((unsigned) code) >> 4;
1030  if (run == 0xF) {
1031  ZERO_RUN;
1032  } else {
1033  val = run;
1034  run = (1 << run);
1035  if (val) {
1036  // Given that GET_VLC reloads internally, we always
1037  // have at least 16 bits in the cache here.
1038  run += SHOW_UBITS(re, &s->gb, val);
1039  LAST_SKIP_BITS(re, &s->gb, val);
1040  }
1041  *EOBRUN = run - 1;
1042  break;
1043  }
1044  }
1045  }
1046 
1047  if (i > *last_nnz)
1048  *last_nnz = i;
1049  }
1050 
1051  for (; i <= last; i++) {
1052  j = s->permutated_scantable[i];
1053  if (block[j])
1054  REFINE_BIT(j)
1055  }
1056  CLOSE_READER(re, &s->gb);
1057 
1058  return 0;
1059 }
1060 #undef REFINE_BIT
1061 #undef ZERO_RUN
1062 
1063 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1064 {
1065  int i;
1066  int reset = 0;
1067 
1068  if (s->restart_interval) {
1069  s->restart_count--;
1070  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1071  align_get_bits(&s->gb);
1072  for (i = 0; i < nb_components; i++) /* reset dc */
1073  s->last_dc[i] = (4 << s->bits);
1074  }
1075 
1076  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1077  /* skip RSTn */
1078  if (s->restart_count == 0) {
1079  if( show_bits(&s->gb, i) == (1 << i) - 1
1080  || show_bits(&s->gb, i) == 0xFF) {
1081  int pos = get_bits_count(&s->gb);
1082  align_get_bits(&s->gb);
1083  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1084  skip_bits(&s->gb, 8);
1085  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1086  for (i = 0; i < nb_components; i++) /* reset dc */
1087  s->last_dc[i] = (4 << s->bits);
1088  reset = 1;
1089  } else
1090  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1091  }
1092  }
1093  }
1094  return reset;
1095 }
1096 
1097 /* Handles 1 to 4 components */
1098 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1099 {
1100  int i, mb_x, mb_y;
1101  unsigned width;
1102  uint16_t (*buffer)[4];
1103  int left[4], top[4], topleft[4];
1104  const int linesize = s->linesize[0];
1105  const int mask = ((1 << s->bits) - 1) << point_transform;
1106  int resync_mb_y = 0;
1107  int resync_mb_x = 0;
1108  int vpred[6];
1109  int ret;
1110 
1111  if (!s->bayer && s->nb_components < 3)
1112  return AVERROR_INVALIDDATA;
1113  if (s->bayer && s->nb_components > 2)
1114  return AVERROR_INVALIDDATA;
1115  if (s->nb_components <= 0 || s->nb_components > 4)
1116  return AVERROR_INVALIDDATA;
1117  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1118  return AVERROR_INVALIDDATA;
1119  if (s->bayer) {
1120  if (s->rct || s->pegasus_rct)
1121  return AVERROR_INVALIDDATA;
1122  }
1123 
1124 
1125  s->restart_count = s->restart_interval;
1126 
1127  if (s->restart_interval == 0)
1128  s->restart_interval = INT_MAX;
1129 
1130  if (s->bayer)
1131  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1132  else
1133  width = s->mb_width;
1134 
1135  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1136  if (!s->ljpeg_buffer)
1137  return AVERROR(ENOMEM);
1138 
1139  buffer = s->ljpeg_buffer;
1140 
1141  for (i = 0; i < 4; i++)
1142  buffer[0][i] = 1 << (s->bits - 1);
1143 
1144  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1145  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1146 
1147  if (s->interlaced && s->bottom_field)
1148  ptr += linesize >> 1;
1149 
1150  for (i = 0; i < 4; i++)
1151  top[i] = left[i] = topleft[i] = buffer[0][i];
1152 
1153  if ((mb_y * s->width) % s->restart_interval == 0) {
1154  for (i = 0; i < 6; i++)
1155  vpred[i] = 1 << (s->bits-1);
1156  }
1157 
1158  for (mb_x = 0; mb_x < width; mb_x++) {
1159  int modified_predictor = predictor;
1160 
1161  if (get_bits_left(&s->gb) < 1) {
1162  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1163  return AVERROR_INVALIDDATA;
1164  }
1165 
1166  if (s->restart_interval && !s->restart_count){
1167  s->restart_count = s->restart_interval;
1168  resync_mb_x = mb_x;
1169  resync_mb_y = mb_y;
1170  for(i=0; i<4; i++)
1171  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1172  }
1173  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1174  modified_predictor = 1;
1175 
1176  for (i=0;i<nb_components;i++) {
1177  int pred, dc;
1178 
1179  topleft[i] = top[i];
1180  top[i] = buffer[mb_x][i];
1181 
1182  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1183  if (ret < 0)
1184  return ret;
1185 
1186  if (!s->bayer || mb_x) {
1187  pred = left[i];
1188  } else { /* This path runs only for the first line in bayer images */
1189  vpred[i] += dc;
1190  pred = vpred[i] - dc;
1191  }
1192 
1193  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1194 
1195  left[i] = buffer[mb_x][i] =
1196  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1197  }
1198 
1199  if (s->restart_interval && !--s->restart_count) {
1200  align_get_bits(&s->gb);
1201  skip_bits(&s->gb, 16); /* skip RSTn */
1202  }
1203  }
1204  if (s->rct && s->nb_components == 4) {
1205  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1206  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1207  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1208  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1209  ptr[4*mb_x + 0] = buffer[mb_x][3];
1210  }
1211  } else if (s->nb_components == 4) {
1212  for(i=0; i<nb_components; i++) {
1213  int c= s->comp_index[i];
1214  if (s->bits <= 8) {
1215  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1216  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1217  }
1218  } else if(s->bits == 9) {
1219  return AVERROR_PATCHWELCOME;
1220  } else {
1221  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1222  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1223  }
1224  }
1225  }
1226  } else if (s->rct) {
1227  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1228  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1229  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1230  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1231  }
1232  } else if (s->pegasus_rct) {
1233  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1234  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1235  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1236  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1237  }
1238  } else if (s->bayer) {
1239  if (s->bits <= 8)
1240  return AVERROR_PATCHWELCOME;
1241  if (nb_components == 1) {
1242  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1243  for (mb_x = 0; mb_x < width; mb_x++)
1244  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1245  } else if (nb_components == 2) {
1246  for (mb_x = 0; mb_x < width; mb_x++) {
1247  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1248  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1249  }
1250  }
1251  } else {
1252  for(i=0; i<nb_components; i++) {
1253  int c= s->comp_index[i];
1254  if (s->bits <= 8) {
1255  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1256  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1257  }
1258  } else if(s->bits == 9) {
1259  return AVERROR_PATCHWELCOME;
1260  } else {
1261  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1262  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1263  }
1264  }
1265  }
1266  }
1267  }
1268  return 0;
1269 }
1270 
1272  int point_transform, int nb_components)
1273 {
1274  int i, mb_x, mb_y, mask;
1275  int bits= (s->bits+7)&~7;
1276  int resync_mb_y = 0;
1277  int resync_mb_x = 0;
1278  int ret;
1279 
1280  point_transform += bits - s->bits;
1281  mask = ((1 << s->bits) - 1) << point_transform;
1282 
1283  av_assert0(nb_components>=1 && nb_components<=4);
1284 
1285  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1286  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1287  if (get_bits_left(&s->gb) < 1) {
1288  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1289  return AVERROR_INVALIDDATA;
1290  }
1291  if (s->restart_interval && !s->restart_count){
1292  s->restart_count = s->restart_interval;
1293  resync_mb_x = mb_x;
1294  resync_mb_y = mb_y;
1295  }
1296 
1297  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1298  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1299  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1300  for (i = 0; i < nb_components; i++) {
1301  uint8_t *ptr;
1302  uint16_t *ptr16;
1303  int n, h, v, x, y, c, j, linesize;
1304  n = s->nb_blocks[i];
1305  c = s->comp_index[i];
1306  h = s->h_scount[i];
1307  v = s->v_scount[i];
1308  x = 0;
1309  y = 0;
1310  linesize= s->linesize[c];
1311 
1312  if(bits>8) linesize /= 2;
1313 
1314  for(j=0; j<n; j++) {
1315  int pred, dc;
1316 
1317  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1318  if (ret < 0)
1319  return ret;
1320 
1321  if ( h * mb_x + x >= s->width
1322  || v * mb_y + y >= s->height) {
1323  // Nothing to do
1324  } else if (bits<=8) {
1325  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1326  if(y==0 && toprow){
1327  if(x==0 && leftcol){
1328  pred= 1 << (bits - 1);
1329  }else{
1330  pred= ptr[-1];
1331  }
1332  }else{
1333  if(x==0 && leftcol){
1334  pred= ptr[-linesize];
1335  }else{
1336  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1337  }
1338  }
1339 
1340  if (s->interlaced && s->bottom_field)
1341  ptr += linesize >> 1;
1342  pred &= mask;
1343  *ptr= pred + ((unsigned)dc << point_transform);
1344  }else{
1345  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1346  if(y==0 && toprow){
1347  if(x==0 && leftcol){
1348  pred= 1 << (bits - 1);
1349  }else{
1350  pred= ptr16[-1];
1351  }
1352  }else{
1353  if(x==0 && leftcol){
1354  pred= ptr16[-linesize];
1355  }else{
1356  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1357  }
1358  }
1359 
1360  if (s->interlaced && s->bottom_field)
1361  ptr16 += linesize >> 1;
1362  pred &= mask;
1363  *ptr16= pred + ((unsigned)dc << point_transform);
1364  }
1365  if (++x == h) {
1366  x = 0;
1367  y++;
1368  }
1369  }
1370  }
1371  } else {
1372  for (i = 0; i < nb_components; i++) {
1373  uint8_t *ptr;
1374  uint16_t *ptr16;
1375  int n, h, v, x, y, c, j, linesize, dc;
1376  n = s->nb_blocks[i];
1377  c = s->comp_index[i];
1378  h = s->h_scount[i];
1379  v = s->v_scount[i];
1380  x = 0;
1381  y = 0;
1382  linesize = s->linesize[c];
1383 
1384  if(bits>8) linesize /= 2;
1385 
1386  for (j = 0; j < n; j++) {
1387  int pred;
1388 
1389  ret = mjpeg_decode_dc(s, s->dc_index[i], &dc);
1390  if (ret < 0)
1391  return ret;
1392 
1393  if ( h * mb_x + x >= s->width
1394  || v * mb_y + y >= s->height) {
1395  // Nothing to do
1396  } else if (bits<=8) {
1397  ptr = s->picture_ptr->data[c] +
1398  (linesize * (v * mb_y + y)) +
1399  (h * mb_x + x); //FIXME optimize this crap
1400  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1401 
1402  pred &= mask;
1403  *ptr = pred + ((unsigned)dc << point_transform);
1404  }else{
1405  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1406  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1407 
1408  pred &= mask;
1409  *ptr16= pred + ((unsigned)dc << point_transform);
1410  }
1411 
1412  if (++x == h) {
1413  x = 0;
1414  y++;
1415  }
1416  }
1417  }
1418  }
1419  if (s->restart_interval && !--s->restart_count) {
1420  align_get_bits(&s->gb);
1421  skip_bits(&s->gb, 16); /* skip RSTn */
1422  }
1423  }
1424  }
1425  return 0;
1426 }
1427 
1429  uint8_t *dst, const uint8_t *src,
1430  int linesize, int lowres)
1431 {
1432  switch (lowres) {
1433  case 0: s->copy_block(dst, src, linesize, 8);
1434  break;
1435  case 1: copy_block4(dst, src, linesize, linesize, 4);
1436  break;
1437  case 2: copy_block2(dst, src, linesize, linesize, 2);
1438  break;
1439  case 3: *dst = *src;
1440  break;
1441  }
1442 }
1443 
1444 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1445 {
1446  int block_x, block_y;
1447  int size = 8 >> s->avctx->lowres;
1448  if (s->bits > 8) {
1449  for (block_y=0; block_y<size; block_y++)
1450  for (block_x=0; block_x<size; block_x++)
1451  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1452  } else {
1453  for (block_y=0; block_y<size; block_y++)
1454  for (block_x=0; block_x<size; block_x++)
1455  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1456  }
1457 }
1458 
1459 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1460  int Al, const uint8_t *mb_bitmask,
1461  int mb_bitmask_size,
1462  const AVFrame *reference)
1463 {
1464  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1465  uint8_t *data[MAX_COMPONENTS];
1466  const uint8_t *reference_data[MAX_COMPONENTS];
1467  int linesize[MAX_COMPONENTS];
1468  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1469  int bytes_per_pixel = 1 + (s->bits > 8);
1470 
1471  if (mb_bitmask) {
1472  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1473  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1474  return AVERROR_INVALIDDATA;
1475  }
1476  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1477  }
1478 
1479  s->restart_count = 0;
1480 
1481  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1482  &chroma_v_shift);
1483  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1484  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1485 
1486  for (i = 0; i < nb_components; i++) {
1487  int c = s->comp_index[i];
1488  data[c] = s->picture_ptr->data[c];
1489  reference_data[c] = reference ? reference->data[c] : NULL;
1490  linesize[c] = s->linesize[c];
1491  s->coefs_finished[c] |= 1;
1492  }
1493 
1494  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1495  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1496  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1497 
1498  if (s->restart_interval && !s->restart_count)
1499  s->restart_count = s->restart_interval;
1500 
1501  if (get_bits_left(&s->gb) < 0) {
1502  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1503  -get_bits_left(&s->gb));
1504  return AVERROR_INVALIDDATA;
1505  }
1506  for (i = 0; i < nb_components; i++) {
1507  uint8_t *ptr;
1508  int n, h, v, x, y, c, j;
1509  int block_offset;
1510  n = s->nb_blocks[i];
1511  c = s->comp_index[i];
1512  h = s->h_scount[i];
1513  v = s->v_scount[i];
1514  x = 0;
1515  y = 0;
1516  for (j = 0; j < n; j++) {
1517  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1518  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1519 
1520  if (s->interlaced && s->bottom_field)
1521  block_offset += linesize[c] >> 1;
1522  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1523  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1524  ptr = data[c] + block_offset;
1525  } else
1526  ptr = NULL;
1527  if (!s->progressive) {
1528  if (copy_mb) {
1529  if (ptr)
1530  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1531  linesize[c], s->avctx->lowres);
1532 
1533  } else {
1534  s->bdsp.clear_block(s->block);
1535  if (decode_block(s, s->block, i,
1536  s->dc_index[i], s->ac_index[i],
1537  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1538  av_log(s->avctx, AV_LOG_ERROR,
1539  "error y=%d x=%d\n", mb_y, mb_x);
1540  return AVERROR_INVALIDDATA;
1541  }
1542  if (ptr && linesize[c]) {
1543  s->idsp.idct_put(ptr, linesize[c], s->block);
1544  if (s->bits & 7)
1545  shift_output(s, ptr, linesize[c]);
1546  }
1547  }
1548  } else {
1549  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1550  (h * mb_x + x);
1551  int16_t *block = s->blocks[c][block_idx];
1552  if (Ah)
1553  block[0] += get_bits1(&s->gb) *
1554  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1555  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1556  s->quant_matrixes[s->quant_sindex[i]],
1557  Al) < 0) {
1558  av_log(s->avctx, AV_LOG_ERROR,
1559  "error y=%d x=%d\n", mb_y, mb_x);
1560  return AVERROR_INVALIDDATA;
1561  }
1562  }
1563  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1564  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1565  mb_x, mb_y, x, y, c, s->bottom_field,
1566  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1567  if (++x == h) {
1568  x = 0;
1569  y++;
1570  }
1571  }
1572  }
1573 
1574  handle_rstn(s, nb_components);
1575  }
1576  }
1577  return 0;
1578 }
1579 
1581  int se, int Ah, int Al)
1582 {
1583  int mb_x, mb_y;
1584  int EOBRUN = 0;
1585  int c = s->comp_index[0];
1586  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1587 
1588  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1589  if (se < ss || se > 63) {
1590  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1591  return AVERROR_INVALIDDATA;
1592  }
1593 
1594  // s->coefs_finished is a bitmask for coefficients coded
1595  // ss and se are parameters telling start and end coefficients
1596  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1597 
1598  s->restart_count = 0;
1599 
1600  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1601  int block_idx = mb_y * s->block_stride[c];
1602  int16_t (*block)[64] = &s->blocks[c][block_idx];
1603  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1604  if (get_bits_left(&s->gb) <= 0) {
1605  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1606  return AVERROR_INVALIDDATA;
1607  }
1608  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1609  int ret;
1610  if (s->restart_interval && !s->restart_count)
1611  s->restart_count = s->restart_interval;
1612 
1613  if (Ah)
1614  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1615  quant_matrix, ss, se, Al, &EOBRUN);
1616  else
1617  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1618  quant_matrix, ss, se, Al, &EOBRUN);
1619 
1620  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1622  if (ret < 0) {
1623  av_log(s->avctx, AV_LOG_ERROR,
1624  "error y=%d x=%d\n", mb_y, mb_x);
1625  return AVERROR_INVALIDDATA;
1626  }
1627 
1628  if (handle_rstn(s, 0))
1629  EOBRUN = 0;
1630  }
1631  }
1632  return 0;
1633 }
1634 
1636 {
1637  int mb_x, mb_y;
1638  int c;
1639  const int bytes_per_pixel = 1 + (s->bits > 8);
1640  const int block_size = s->lossless ? 1 : 8;
1641 
1642  for (c = 0; c < s->nb_components; c++) {
1643  uint8_t *data = s->picture_ptr->data[c];
1644  int linesize = s->linesize[c];
1645  int h = s->h_max / s->h_count[c];
1646  int v = s->v_max / s->v_count[c];
1647  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1648  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1649 
1650  if (~s->coefs_finished[c])
1651  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1652 
1653  if (s->interlaced && s->bottom_field)
1654  data += linesize >> 1;
1655 
1656  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1657  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1658  int block_idx = mb_y * s->block_stride[c];
1659  int16_t (*block)[64] = &s->blocks[c][block_idx];
1660  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1661  s->idsp.idct_put(ptr, linesize, *block);
1662  if (s->bits & 7)
1663  shift_output(s, ptr, linesize);
1664  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1665  }
1666  }
1667  }
1668 }
1669 
1670 int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
1671  int mb_bitmask_size, const AVFrame *reference)
1672 {
1673  int len, nb_components, i, h, v, predictor, point_transform;
1674  int index, id, ret;
1675  const int block_size = s->lossless ? 1 : 8;
1676  int ilv, prev_shift;
1677 
1678  if (!s->got_picture) {
1679  av_log(s->avctx, AV_LOG_WARNING,
1680  "Can not process SOS before SOF, skipping\n");
1681  return AVERROR_INVALIDDATA;
1682  }
1683 
1684  /* XXX: verify len field validity */
1685  len = get_bits(&s->gb, 16);
1686  nb_components = get_bits(&s->gb, 8);
1687  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1689  "decode_sos: nb_components (%d)",
1690  nb_components);
1691  return AVERROR_PATCHWELCOME;
1692  }
1693  if (len != 6 + 2 * nb_components) {
1694  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1695  return AVERROR_INVALIDDATA;
1696  }
1697  for (i = 0; i < nb_components; i++) {
1698  id = get_bits(&s->gb, 8);
1699  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1700  /* find component index */
1701  for (index = 0; index < s->nb_components; index++)
1702  if (id == s->component_id[index])
1703  break;
1704  if (index == s->nb_components) {
1705  av_log(s->avctx, AV_LOG_ERROR,
1706  "decode_sos: index(%d) out of components\n", index);
1707  return AVERROR_INVALIDDATA;
1708  }
1709  /* Metasoft MJPEG codec has Cb and Cr swapped */
1710  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1711  && nb_components == 3 && s->nb_components == 3 && i)
1712  index = 3 - i;
1713 
1714  s->quant_sindex[i] = s->quant_index[index];
1715  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1716  s->h_scount[i] = s->h_count[index];
1717  s->v_scount[i] = s->v_count[index];
1718 
1719  s->comp_index[i] = index;
1720 
1721  s->dc_index[i] = get_bits(&s->gb, 4);
1722  s->ac_index[i] = get_bits(&s->gb, 4);
1723 
1724  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1725  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1726  goto out_of_range;
1727  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1728  goto out_of_range;
1729  }
1730 
1731  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1732  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1733  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1734  prev_shift = get_bits(&s->gb, 4); /* Ah */
1735  point_transform = get_bits(&s->gb, 4); /* Al */
1736  }else
1737  prev_shift = point_transform = 0;
1738 
1739  if (nb_components > 1) {
1740  /* interleaved stream */
1741  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1742  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1743  } else if (!s->ls) { /* skip this for JPEG-LS */
1744  h = s->h_max / s->h_scount[0];
1745  v = s->v_max / s->v_scount[0];
1746  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1747  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1748  s->nb_blocks[0] = 1;
1749  s->h_scount[0] = 1;
1750  s->v_scount[0] = 1;
1751  }
1752 
1753  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1754  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1755  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1756  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1757  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1758 
1759 
1760  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1761  for (i = s->mjpb_skiptosod; i > 0; i--)
1762  skip_bits(&s->gb, 8);
1763 
1764 next_field:
1765  for (i = 0; i < nb_components; i++)
1766  s->last_dc[i] = (4 << s->bits);
1767 
1768  if (s->avctx->hwaccel) {
1769  int bytes_to_start = get_bits_count(&s->gb) / 8;
1770  av_assert0(bytes_to_start >= 0 &&
1771  s->raw_scan_buffer_size >= bytes_to_start);
1772 
1773  ret = FF_HW_CALL(s->avctx, decode_slice,
1774  s->raw_scan_buffer + bytes_to_start,
1775  s->raw_scan_buffer_size - bytes_to_start);
1776  if (ret < 0)
1777  return ret;
1778 
1779  } else if (s->lossless) {
1780  av_assert0(s->picture_ptr == s->picture);
1781  if (CONFIG_JPEGLS_DECODER && s->ls) {
1782 // for () {
1783 // reset_ls_coding_parameters(s, 0);
1784 
1786  point_transform, ilv)) < 0)
1787  return ret;
1788  } else {
1789  if (s->rgb || s->bayer) {
1790  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1791  return ret;
1792  } else {
1794  point_transform,
1795  nb_components)) < 0)
1796  return ret;
1797  }
1798  }
1799  } else {
1800  if (s->progressive && predictor) {
1801  av_assert0(s->picture_ptr == s->picture);
1803  ilv, prev_shift,
1804  point_transform)) < 0)
1805  return ret;
1806  } else {
1807  if ((ret = mjpeg_decode_scan(s, nb_components,
1808  prev_shift, point_transform,
1809  mb_bitmask, mb_bitmask_size, reference)) < 0)
1810  return ret;
1811  }
1812  }
1813 
1814  if (s->interlaced &&
1815  get_bits_left(&s->gb) > 32 &&
1816  show_bits(&s->gb, 8) == 0xFF) {
1817  GetBitContext bak = s->gb;
1818  align_get_bits(&bak);
1819  if (show_bits(&bak, 16) == 0xFFD1) {
1820  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1821  s->gb = bak;
1822  skip_bits(&s->gb, 16);
1823  s->bottom_field ^= 1;
1824 
1825  goto next_field;
1826  }
1827  }
1828 
1829  return 0;
1830  out_of_range:
1831  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1832  return AVERROR_INVALIDDATA;
1833 }
1834 
1836 {
1837  if (get_bits(&s->gb, 16) != 4)
1838  return AVERROR_INVALIDDATA;
1839  s->restart_interval = get_bits(&s->gb, 16);
1840  s->restart_count = 0;
1841  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1842  s->restart_interval);
1843 
1844  return 0;
1845 }
1846 
1848 {
1849  int len, id, i;
1850 
1851  len = get_bits(&s->gb, 16);
1852  if (len < 2)
1853  return AVERROR_INVALIDDATA;
1854  len -= 2;
1855 
1856  if (len < 4) {
1857  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1858  return AVERROR_INVALIDDATA;
1859  av_log(s->avctx, AV_LOG_VERBOSE, "skipping APPx stub (len=%" PRId32 ")\n", len);
1860  goto out;
1861  }
1862 
1863  if (8 * len > get_bits_left(&s->gb))
1864  return AVERROR_INVALIDDATA;
1865 
1866  id = get_bits_long(&s->gb, 32);
1867  len -= 4;
1868 
1869  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1870  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1871  av_fourcc2str(av_bswap32(id)), id, len);
1872 
1873  /* Buggy AVID, it puts EOI only at every 10th frame. */
1874  /* Also, this fourcc is used by non-avid files too, it holds some
1875  information, but it's always present in AVID-created files. */
1876  if (id == AV_RB32("AVI1")) {
1877  /* structure:
1878  4bytes AVI1
1879  1bytes polarity
1880  1bytes always zero
1881  4bytes field_size
1882  4bytes field_size_less_padding
1883  */
1884  s->buggy_avid = 1;
1885  i = get_bits(&s->gb, 8); len--;
1886  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1887  goto out;
1888  }
1889 
1890  if (id == AV_RB32("JFIF")) {
1891  int t_w, t_h, v1, v2;
1892  if (len < 8)
1893  goto out;
1894  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1895  v1 = get_bits(&s->gb, 8);
1896  v2 = get_bits(&s->gb, 8);
1897  skip_bits(&s->gb, 8);
1898 
1899  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1900  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1901  if ( s->avctx->sample_aspect_ratio.num <= 0
1902  || s->avctx->sample_aspect_ratio.den <= 0) {
1903  s->avctx->sample_aspect_ratio.num = 0;
1904  s->avctx->sample_aspect_ratio.den = 1;
1905  }
1906 
1907  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1908  av_log(s->avctx, AV_LOG_INFO,
1909  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1910  v1, v2,
1911  s->avctx->sample_aspect_ratio.num,
1912  s->avctx->sample_aspect_ratio.den);
1913 
1914  len -= 8;
1915  if (len >= 2) {
1916  t_w = get_bits(&s->gb, 8);
1917  t_h = get_bits(&s->gb, 8);
1918  if (t_w && t_h) {
1919  /* skip thumbnail */
1920  if (len -10 - (t_w * t_h * 3) > 0)
1921  len -= t_w * t_h * 3;
1922  }
1923  len -= 2;
1924  }
1925  goto out;
1926  }
1927 
1928  if ( id == AV_RB32("Adob")
1929  && len >= 8
1930  && show_bits(&s->gb, 8) == 'e'
1931  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1932  skip_bits(&s->gb, 8); /* 'e' */
1933  skip_bits(&s->gb, 16); /* version */
1934  skip_bits(&s->gb, 16); /* flags0 */
1935  skip_bits(&s->gb, 16); /* flags1 */
1936  s->adobe_transform = get_bits(&s->gb, 8);
1937  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1938  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1939  len -= 8;
1940  goto out;
1941  }
1942 
1943  if (id == AV_RB32("LJIF")) {
1944  int rgb = s->rgb;
1945  int pegasus_rct = s->pegasus_rct;
1946  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1947  av_log(s->avctx, AV_LOG_INFO,
1948  "Pegasus lossless jpeg header found\n");
1949  skip_bits(&s->gb, 16); /* version ? */
1950  skip_bits(&s->gb, 16); /* unknown always 0? */
1951  skip_bits(&s->gb, 16); /* unknown always 0? */
1952  skip_bits(&s->gb, 16); /* unknown always 0? */
1953  switch (i=get_bits(&s->gb, 8)) {
1954  case 1:
1955  rgb = 1;
1956  pegasus_rct = 0;
1957  break;
1958  case 2:
1959  rgb = 1;
1960  pegasus_rct = 1;
1961  break;
1962  default:
1963  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1964  }
1965 
1966  len -= 9;
1967  if (s->bayer)
1968  goto out;
1969  if (s->got_picture)
1970  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1971  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1972  goto out;
1973  }
1974 
1975  s->rgb = rgb;
1976  s->pegasus_rct = pegasus_rct;
1977 
1978  goto out;
1979  }
1980  if (id == AV_RL32("colr") && len > 0) {
1981  s->colr = get_bits(&s->gb, 8);
1982  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1983  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1984  len --;
1985  goto out;
1986  }
1987  if (id == AV_RL32("xfrm") && len > 0) {
1988  s->xfrm = get_bits(&s->gb, 8);
1989  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1990  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1991  len --;
1992  goto out;
1993  }
1994 
1995  /* JPS extension by VRex */
1996  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1997  int flags, layout, type;
1998  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1999  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
2000 
2001  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
2002  skip_bits(&s->gb, 16); len -= 2; /* block length */
2003  skip_bits(&s->gb, 8); /* reserved */
2004  flags = get_bits(&s->gb, 8);
2005  layout = get_bits(&s->gb, 8);
2006  type = get_bits(&s->gb, 8);
2007  len -= 4;
2008 
2009  av_freep(&s->stereo3d);
2010  s->stereo3d = av_stereo3d_alloc();
2011  if (!s->stereo3d) {
2012  goto out;
2013  }
2014  if (type == 0) {
2015  s->stereo3d->type = AV_STEREO3D_2D;
2016  } else if (type == 1) {
2017  switch (layout) {
2018  case 0x01:
2019  s->stereo3d->type = AV_STEREO3D_LINES;
2020  break;
2021  case 0x02:
2022  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
2023  break;
2024  case 0x03:
2025  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
2026  break;
2027  }
2028  if (!(flags & 0x04)) {
2029  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
2030  }
2031  }
2032  goto out;
2033  }
2034 
2035  /* EXIF metadata */
2036  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2037  int ret;
2038  const uint8_t *aligned;
2039 
2040  skip_bits(&s->gb, 16); // skip padding
2041  len -= 2;
2042 
2043  // init byte wise reading
2044  aligned = align_get_bits(&s->gb);
2045 
2046  ret = av_exif_parse_buffer(s->avctx, aligned, len, &s->exif_metadata, AV_EXIF_TIFF_HEADER);
2047  if (ret < 0) {
2048  av_log(s->avctx, AV_LOG_WARNING, "unable to parse EXIF buffer\n");
2049  goto out;
2050  }
2051 
2052  skip_bits(&s->gb, ret << 3);
2053  len -= ret;
2054 
2055  goto out;
2056  }
2057 
2058  /* Apple MJPEG-A */
2059  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2060  id = get_bits_long(&s->gb, 32);
2061  len -= 4;
2062  /* Apple MJPEG-A */
2063  if (id == AV_RB32("mjpg")) {
2064  /* structure:
2065  4bytes field size
2066  4bytes pad field size
2067  4bytes next off
2068  4bytes quant off
2069  4bytes huff off
2070  4bytes image off
2071  4bytes scan off
2072  4bytes data off
2073  */
2074  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2075  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2076  }
2077  }
2078 
2079  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2080  int id2;
2081  unsigned seqno;
2082  unsigned nummarkers;
2083 
2084  id = get_bits_long(&s->gb, 32);
2085  id2 = get_bits(&s->gb, 24);
2086  len -= 7;
2087  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2088  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2089  goto out;
2090  }
2091 
2092  skip_bits(&s->gb, 8);
2093  seqno = get_bits(&s->gb, 8);
2094  len -= 2;
2095  if (seqno == 0) {
2096  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2097  goto out;
2098  }
2099 
2100  nummarkers = get_bits(&s->gb, 8);
2101  len -= 1;
2102  if (nummarkers == 0) {
2103  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2104  goto out;
2105  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2106  av_log(s->avctx, AV_LOG_WARNING, "Mismatch in coded number of ICC markers between markers\n");
2107  goto out;
2108  } else if (seqno > nummarkers) {
2109  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2110  goto out;
2111  }
2112 
2113  /* Allocate if this is the first APP2 we've seen. */
2114  if (s->iccnum == 0) {
2115  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2116  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2117  return AVERROR(ENOMEM);
2118  }
2119  s->iccnum = nummarkers;
2120  }
2121 
2122  if (s->iccentries[seqno - 1].data) {
2123  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2124  goto out;
2125  }
2126 
2127  s->iccentries[seqno - 1].length = len;
2128  s->iccentries[seqno - 1].data = av_malloc(len);
2129  if (!s->iccentries[seqno - 1].data) {
2130  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2131  return AVERROR(ENOMEM);
2132  }
2133 
2134  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2135  skip_bits(&s->gb, len << 3);
2136  len = 0;
2137  s->iccread++;
2138 
2139  if (s->iccread > s->iccnum)
2140  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2141  }
2142 
2143 out:
2144  /* slow but needed for extreme adobe jpegs */
2145  if (len < 0)
2146  av_log(s->avctx, AV_LOG_ERROR,
2147  "mjpeg: error, decode_app parser read over the end\n");
2148  while (len-- > 0)
2149  skip_bits(&s->gb, 8);
2150 
2151  return 0;
2152 }
2153 
2155 {
2156  int len = get_bits(&s->gb, 16);
2157  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2158  int i;
2159  char *cbuf = av_malloc(len - 1);
2160  if (!cbuf)
2161  return AVERROR(ENOMEM);
2162 
2163  for (i = 0; i < len - 2; i++)
2164  cbuf[i] = get_bits(&s->gb, 8);
2165  if (i > 0 && cbuf[i - 1] == '\n')
2166  cbuf[i - 1] = 0;
2167  else
2168  cbuf[i] = 0;
2169 
2170  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2171  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2172 
2173  /* buggy avid, it puts EOI only at every 10th frame */
2174  if (!strncmp(cbuf, "AVID", 4)) {
2175  parse_avid(s, cbuf, len);
2176  } else if (!strcmp(cbuf, "CS=ITU601"))
2177  s->cs_itu601 = 1;
2178  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2179  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2180  s->flipped = 1;
2181  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2182  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2183  s->multiscope = 2;
2184  }
2185 
2186  av_free(cbuf);
2187  }
2188 
2189  return 0;
2190 }
2191 
2192 /* return the 8 bit start code value and update the search
2193  state. Return -1 if no start code found */
2194 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2195 {
2196  const uint8_t *buf_ptr;
2197  int val;
2198  int skipped = 0;
2199 
2200  buf_ptr = *pbuf_ptr;
2201  while ((buf_ptr = memchr(buf_ptr, 0xff, buf_end - buf_ptr))) {
2202  buf_ptr++;
2203  while (buf_ptr < buf_end) {
2204  val = *buf_ptr++;
2205  if (val != 0xff) {
2206  if ((val >= SOF0) && (val <= COM))
2207  goto found;
2208  break;
2209  }
2210  }
2211  skipped++;
2212  }
2213  buf_ptr = buf_end;
2214  val = -1;
2215 found:
2216  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2217  *pbuf_ptr = buf_ptr;
2218  return val;
2219 }
2220 
2222  const uint8_t **buf_ptr, const uint8_t *buf_end,
2223  const uint8_t **unescaped_buf_ptr,
2224  int *unescaped_buf_size)
2225 {
2226  int start_code;
2227  start_code = find_marker(buf_ptr, buf_end);
2228 
2229  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2230  if (!s->buffer)
2231  return AVERROR(ENOMEM);
2232 
2233  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2234  if (start_code == SOS && !s->ls) {
2235  const uint8_t *src = *buf_ptr;
2236  const uint8_t *ptr = src;
2237  uint8_t *dst = s->buffer;
2238 
2239  #define copy_data_segment(skip) do { \
2240  ptrdiff_t length = (ptr - src) - (skip); \
2241  if (length > 0) { \
2242  memcpy(dst, src, length); \
2243  dst += length; \
2244  src = ptr; \
2245  } \
2246  } while (0)
2247 
2248  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2249  ptr = buf_end;
2250  copy_data_segment(0);
2251  } else {
2252  while (ptr < buf_end) {
2253  uint8_t x = *(ptr++);
2254 
2255  if (x == 0xff) {
2256  ptrdiff_t skip = 0;
2257  while (ptr < buf_end && x == 0xff) {
2258  x = *(ptr++);
2259  skip++;
2260  }
2261 
2262  /* 0xFF, 0xFF, ... */
2263  if (skip > 1) {
2265 
2266  /* decrement src as it is equal to ptr after the
2267  * copy_data_segment macro and we might want to
2268  * copy the current value of x later on */
2269  src--;
2270  }
2271 
2272  if (x < RST0 || x > RST7) {
2273  copy_data_segment(1);
2274  if (x)
2275  break;
2276  }
2277  }
2278  }
2279  if (src < ptr)
2280  copy_data_segment(0);
2281  }
2282  #undef copy_data_segment
2283 
2284  *unescaped_buf_ptr = s->buffer;
2285  *unescaped_buf_size = dst - s->buffer;
2286  memset(s->buffer + *unescaped_buf_size, 0,
2288 
2289  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %td bytes\n",
2290  (buf_end - *buf_ptr) - (dst - s->buffer));
2291  } else if (start_code == SOS && s->ls) {
2292  const uint8_t *src = *buf_ptr;
2293  uint8_t *dst = s->buffer;
2294  int bit_count = 0;
2295  int t = 0, b = 0;
2296  PutBitContext pb;
2297 
2298  /* find marker */
2299  while (src + t < buf_end) {
2300  uint8_t x = src[t++];
2301  if (x == 0xff) {
2302  while ((src + t < buf_end) && x == 0xff)
2303  x = src[t++];
2304  if (x & 0x80) {
2305  t -= FFMIN(2, t);
2306  break;
2307  }
2308  }
2309  }
2310  bit_count = t * 8;
2311  init_put_bits(&pb, dst, t);
2312 
2313  /* unescape bitstream */
2314  while (b < t) {
2315  uint8_t x = src[b++];
2316  put_bits(&pb, 8, x);
2317  if (x == 0xFF && b < t) {
2318  x = src[b++];
2319  if (x & 0x80) {
2320  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2321  x &= 0x7f;
2322  }
2323  put_bits(&pb, 7, x);
2324  bit_count--;
2325  }
2326  }
2327  flush_put_bits(&pb);
2328 
2329  *unescaped_buf_ptr = dst;
2330  *unescaped_buf_size = (bit_count + 7) >> 3;
2331  memset(s->buffer + *unescaped_buf_size, 0,
2333  } else {
2334  *unescaped_buf_ptr = *buf_ptr;
2335  *unescaped_buf_size = buf_end - *buf_ptr;
2336  }
2337 
2338  return start_code;
2339 }
2340 
2342 {
2343  int i;
2344 
2345  if (s->iccentries) {
2346  for (i = 0; i < s->iccnum; i++)
2347  av_freep(&s->iccentries[i].data);
2348  av_freep(&s->iccentries);
2349  }
2350 
2351  s->iccread = 0;
2352  s->iccnum = 0;
2353 }
2354 
2356  int *got_frame, const AVPacket *avpkt,
2357  const uint8_t *buf, const int buf_size)
2358 {
2359  MJpegDecodeContext *s = avctx->priv_data;
2360  const uint8_t *buf_end, *buf_ptr;
2361  const uint8_t *unescaped_buf_ptr;
2362  int hshift, vshift;
2363  int unescaped_buf_size;
2364  int start_code;
2365  int index;
2366  int ret = 0;
2367  int is16bit;
2368 
2369  s->force_pal8 = 0;
2370 
2371  s->buf_size = buf_size;
2372 
2373  av_exif_free(&s->exif_metadata);
2374  av_freep(&s->stereo3d);
2375  s->adobe_transform = -1;
2376 
2377  if (s->iccnum != 0)
2379 
2380 redo_for_pal8:
2381  buf_ptr = buf;
2382  buf_end = buf + buf_size;
2383  while (buf_ptr < buf_end) {
2384  /* find start next marker */
2385  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2386  &unescaped_buf_ptr,
2387  &unescaped_buf_size);
2388  /* EOF */
2389  if (start_code < 0) {
2390  break;
2391  } else if (unescaped_buf_size > INT_MAX / 8) {
2392  av_log(avctx, AV_LOG_ERROR,
2393  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2394  start_code, unescaped_buf_size, buf_size);
2395  return AVERROR_INVALIDDATA;
2396  }
2397  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n",
2398  start_code, buf_end - buf_ptr);
2399 
2400  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2401 
2402  if (ret < 0) {
2403  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2404  goto fail;
2405  }
2406 
2407  s->start_code = start_code;
2408  if (avctx->debug & FF_DEBUG_STARTCODE)
2409  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2410 
2411  /* process markers */
2412  if (start_code >= RST0 && start_code <= RST7) {
2413  av_log(avctx, AV_LOG_DEBUG,
2414  "restart marker: %d\n", start_code & 0x0f);
2415  /* APP fields */
2416  } else if (start_code >= APP0 && start_code <= APP15) {
2417  if ((ret = mjpeg_decode_app(s)) < 0)
2418  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2419  av_err2str(ret));
2420  /* Comment */
2421  } else if (start_code == COM) {
2422  ret = mjpeg_decode_com(s);
2423  if (ret < 0)
2424  return ret;
2425  } else if (start_code == DQT) {
2427  if (ret < 0)
2428  return ret;
2429  }
2430 
2431  ret = -1;
2432 
2433  if (!CONFIG_JPEGLS_DECODER &&
2434  (start_code == SOF48 || start_code == LSE)) {
2435  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2436  return AVERROR(ENOSYS);
2437  }
2438 
2439  if (avctx->skip_frame == AVDISCARD_ALL) {
2440  switch(start_code) {
2441  case SOF0:
2442  case SOF1:
2443  case SOF2:
2444  case SOF3:
2445  case SOF48:
2446  break;
2447  default:
2448  goto skip;
2449  }
2450  }
2451 
2452  switch (start_code) {
2453  case SOI:
2454  s->restart_interval = 0;
2455  s->restart_count = 0;
2456  s->raw_image_buffer = buf_ptr;
2457  s->raw_image_buffer_size = buf_end - buf_ptr;
2458  /* nothing to do on SOI */
2459  break;
2460  case DHT:
2461  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2462  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2463  goto fail;
2464  }
2465  break;
2466  case SOF0:
2467  case SOF1:
2468  if (start_code == SOF0)
2470  else
2472  s->lossless = 0;
2473  s->ls = 0;
2474  s->progressive = 0;
2475  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2476  goto fail;
2477  break;
2478  case SOF2:
2480  s->lossless = 0;
2481  s->ls = 0;
2482  s->progressive = 1;
2483  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2484  goto fail;
2485  break;
2486  case SOF3:
2488 #if FF_API_CODEC_PROPS
2492 #endif
2493  s->lossless = 1;
2494  s->ls = 0;
2495  s->progressive = 0;
2496  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2497  goto fail;
2498  break;
2499  case SOF48:
2501 #if FF_API_CODEC_PROPS
2505 #endif
2506  s->lossless = 1;
2507  s->ls = 1;
2508  s->progressive = 0;
2509  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2510  goto fail;
2511  break;
2512  case LSE:
2513  if (!CONFIG_JPEGLS_DECODER ||
2514  (ret = ff_jpegls_decode_lse(s)) < 0)
2515  goto fail;
2516  if (ret == 1)
2517  goto redo_for_pal8;
2518  break;
2519  case EOI:
2520 eoi_parser:
2521  if (!avctx->hwaccel &&
2522  s->progressive && s->cur_scan && s->got_picture)
2524  s->cur_scan = 0;
2525  if (!s->got_picture) {
2526  av_log(avctx, AV_LOG_WARNING,
2527  "Found EOI before any SOF, ignoring\n");
2528  break;
2529  }
2530  if (s->interlaced) {
2531  s->bottom_field ^= 1;
2532  /* if not bottom field, do not output image yet */
2533  if (s->bottom_field == !s->interlace_polarity)
2534  break;
2535  }
2536  if (avctx->hwaccel) {
2537  ret = FF_HW_SIMPLE_CALL(avctx, end_frame);
2538  if (ret < 0)
2539  return ret;
2540 
2541  av_freep(&s->hwaccel_picture_private);
2542  }
2543  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2544  return ret;
2545  if (s->lossless)
2546  frame->flags |= AV_FRAME_FLAG_LOSSLESS;
2547  *got_frame = 1;
2548  s->got_picture = 0;
2549 
2550  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2551  int qp = FFMAX3(s->qscale[0],
2552  s->qscale[1],
2553  s->qscale[2]);
2554 
2555  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2556  }
2557 
2558  goto the_end;
2559  case SOS:
2560  s->raw_scan_buffer = buf_ptr;
2561  s->raw_scan_buffer_size = buf_end - buf_ptr;
2562 
2563  s->cur_scan++;
2564 
2565  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2566  (avctx->err_recognition & AV_EF_EXPLODE))
2567  goto fail;
2568  break;
2569  case DRI:
2570  if ((ret = mjpeg_decode_dri(s)) < 0)
2571  return ret;
2572  break;
2573  case SOF5:
2574  case SOF6:
2575  case SOF7:
2576  case SOF9:
2577  case SOF10:
2578  case SOF11:
2579  case SOF13:
2580  case SOF14:
2581  case SOF15:
2582  case JPG:
2583  av_log(avctx, AV_LOG_ERROR,
2584  "mjpeg: unsupported coding type (%x)\n", start_code);
2585  break;
2586  }
2587 
2588  if (avctx->skip_frame == AVDISCARD_ALL) {
2589  switch(start_code) {
2590  case SOF0:
2591  case SOF1:
2592  case SOF2:
2593  case SOF3:
2594  case SOF48:
2595  s->got_picture = 0;
2596  goto the_end_no_picture;
2597  }
2598  }
2599 
2600 skip:
2601  /* eof process start code */
2602  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2603  av_log(avctx, AV_LOG_DEBUG,
2604  "marker parser used %d bytes (%d bits)\n",
2605  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2606  }
2607  if (s->got_picture && s->cur_scan) {
2608  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2609  goto eoi_parser;
2610  }
2611  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2612  return AVERROR_INVALIDDATA;
2613 fail:
2614  s->got_picture = 0;
2615  return ret;
2616 the_end:
2617 
2618  is16bit = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].step > 1;
2619 
2620  if (AV_RB32(s->upscale_h)) {
2621  int p;
2623  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2624  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2625  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2626  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2627  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2628  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2629  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2630  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2631  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2632  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2633  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2634  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2635  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2636  );
2637  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2638  if (ret)
2639  return ret;
2640 
2641  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2642  for (p = 0; p<s->nb_components; p++) {
2643  uint8_t *line = s->picture_ptr->data[p];
2644  int w = s->width;
2645  int h = s->height;
2646  if (!s->upscale_h[p])
2647  continue;
2648  if (p==1 || p==2) {
2649  w = AV_CEIL_RSHIFT(w, hshift);
2650  h = AV_CEIL_RSHIFT(h, vshift);
2651  }
2652  if (s->upscale_v[p] == 1)
2653  h = (h+1)>>1;
2654  av_assert0(w > 0);
2655  for (int i = 0; i < h; i++) {
2656  if (s->upscale_h[p] == 1) {
2657  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2658  else line[w - 1] = line[(w - 1) / 2];
2659  for (index = w - 2; index > 0; index--) {
2660  if (is16bit)
2661  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2662  else
2663  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2664  }
2665  } else if (s->upscale_h[p] == 2) {
2666  if (is16bit) {
2667  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2668  if (w > 1)
2669  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2670  } else {
2671  line[w - 1] = line[(w - 1) / 3];
2672  if (w > 1)
2673  line[w - 2] = line[w - 1];
2674  }
2675  for (index = w - 3; index > 0; index--) {
2676  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2677  }
2678  } else if (s->upscale_h[p] == 4){
2679  if (is16bit) {
2680  uint16_t *line16 = (uint16_t *) line;
2681  line16[w - 1] = line16[(w - 1) >> 2];
2682  if (w > 1)
2683  line16[w - 2] = (line16[(w - 1) >> 2] * 3 + line16[(w - 2) >> 2]) >> 2;
2684  if (w > 2)
2685  line16[w - 3] = (line16[(w - 1) >> 2] + line16[(w - 2) >> 2]) >> 1;
2686  } else {
2687  line[w - 1] = line[(w - 1) >> 2];
2688  if (w > 1)
2689  line[w - 2] = (line[(w - 1) >> 2] * 3 + line[(w - 2) >> 2]) >> 2;
2690  if (w > 2)
2691  line[w - 3] = (line[(w - 1) >> 2] + line[(w - 2) >> 2]) >> 1;
2692  }
2693  for (index = w - 4; index > 0; index--)
2694  line[index] = (line[(index + 3) >> 2] + line[(index + 2) >> 2]
2695  + line[(index + 1) >> 2] + line[index >> 2]) >> 2;
2696  }
2697  line += s->linesize[p];
2698  }
2699  }
2700  }
2701  if (AV_RB32(s->upscale_v)) {
2702  int p;
2704  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2705  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2706  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2707  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2708  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2709  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2710  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2711  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2712  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2713  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2714  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2715  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2716  );
2717  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2718  if (ret)
2719  return ret;
2720 
2721  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2722  for (p = 0; p < s->nb_components; p++) {
2723  uint8_t *dst;
2724  int w = s->width;
2725  int h = s->height;
2726  if (!s->upscale_v[p])
2727  continue;
2728  if (p==1 || p==2) {
2729  w = AV_CEIL_RSHIFT(w, hshift);
2730  h = AV_CEIL_RSHIFT(h, vshift);
2731  }
2732  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2733  for (int i = h - 1; i; i--) {
2734  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2735  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2736  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2737  memcpy(dst, src1, w);
2738  } else {
2739  for (index = 0; index < w; index++)
2740  dst[index] = (src1[index] + src2[index]) >> 1;
2741  }
2742  dst -= s->linesize[p];
2743  }
2744  }
2745  }
2746  if (s->flipped && !s->rgb) {
2747  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &hshift, &vshift);
2748  if (ret)
2749  return ret;
2750 
2751  av_assert0(s->nb_components == av_pix_fmt_count_planes(frame->format));
2752  for (index=0; index<s->nb_components; index++) {
2753  int h = frame->height;
2754  if (index && index < 3)
2755  h = AV_CEIL_RSHIFT(h, vshift);
2756  if (frame->data[index]) {
2757  frame->data[index] += (h - 1) * frame->linesize[index];
2758  frame->linesize[index] *= -1;
2759  }
2760  }
2761  }
2762 
2763  if (avctx->pix_fmt == AV_PIX_FMT_GBRP) {
2764  av_assert0(s->nb_components == 3);
2765  FFSWAP(uint8_t *, frame->data[0], frame->data[2]);
2766  FFSWAP(uint8_t *, frame->data[0], frame->data[1]);
2767  FFSWAP(int, frame->linesize[0], frame->linesize[2]);
2768  FFSWAP(int, frame->linesize[0], frame->linesize[1]);
2769  }
2770 
2771  if (s->adobe_transform == 0 && avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2772  int w = s->picture_ptr->width;
2773  int h = s->picture_ptr->height;
2774  av_assert0(s->nb_components == 4);
2775  for (int i = 0; i < h; i++) {
2776  int j;
2777  uint8_t *dst[4];
2778  for (index=0; index<4; index++) {
2779  dst[index] = s->picture_ptr->data[index]
2780  + s->picture_ptr->linesize[index]*i;
2781  }
2782  for (j=0; j<w; j++) {
2783  int k = dst[3][j];
2784  int r = dst[0][j] * k;
2785  int g = dst[1][j] * k;
2786  int b = dst[2][j] * k;
2787  dst[0][j] = g*257 >> 16;
2788  dst[1][j] = b*257 >> 16;
2789  dst[2][j] = r*257 >> 16;
2790  }
2791  memset(dst[3], 255, w);
2792  }
2793  }
2794  if (s->adobe_transform == 2 && avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2795  int w = s->picture_ptr->width;
2796  int h = s->picture_ptr->height;
2797  av_assert0(s->nb_components == 4);
2798  for (int i = 0; i < h; i++) {
2799  int j;
2800  uint8_t *dst[4];
2801  for (index=0; index<4; index++) {
2802  dst[index] = s->picture_ptr->data[index]
2803  + s->picture_ptr->linesize[index]*i;
2804  }
2805  for (j=0; j<w; j++) {
2806  int k = dst[3][j];
2807  int r = (255 - dst[0][j]) * k;
2808  int g = (128 - dst[1][j]) * k;
2809  int b = (128 - dst[2][j]) * k;
2810  dst[0][j] = r*257 >> 16;
2811  dst[1][j] = (g*257 >> 16) + 128;
2812  dst[2][j] = (b*257 >> 16) + 128;
2813  }
2814  memset(dst[3], 255, w);
2815  }
2816  }
2817 
2818  if (s->stereo3d) {
2820  if (stereo) {
2821  stereo->type = s->stereo3d->type;
2822  stereo->flags = s->stereo3d->flags;
2823  }
2824  av_freep(&s->stereo3d);
2825  }
2826 
2827  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2828  AVFrameSideData *sd;
2829  size_t offset = 0;
2830  int total_size = 0;
2831 
2832  /* Sum size of all parts. */
2833  for (int i = 0; i < s->iccnum; i++)
2834  total_size += s->iccentries[i].length;
2835 
2836  ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_ICC_PROFILE, total_size, &sd);
2837  if (ret < 0) {
2838  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2839  return ret;
2840  }
2841 
2842  if (sd) {
2843  /* Reassemble the parts, which are now in-order. */
2844  for (int i = 0; i < s->iccnum; i++) {
2845  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2846  offset += s->iccentries[i].length;
2847  }
2848  }
2849  }
2850 
2851  if (s->exif_metadata.entries) {
2852  ret = ff_decode_exif_attach_ifd(avctx, frame, &s->exif_metadata);
2853  av_exif_free(&s->exif_metadata);
2854  if (ret < 0)
2855  av_log(avctx, AV_LOG_WARNING, "couldn't attach EXIF metadata\n");
2856  }
2857 
2858  if (avctx->codec_id != AV_CODEC_ID_SMVJPEG &&
2859  (avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2860  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2861  avctx->coded_height > s->orig_height) {
2862  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2863  frame->crop_top = frame->height - avctx->height;
2864  }
2865 
2866 the_end_no_picture:
2867  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %td bytes\n",
2868  buf_end - buf_ptr);
2869  return buf_ptr - buf;
2870 }
2871 
2872 int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame,
2873  AVPacket *avpkt)
2874 {
2875  return ff_mjpeg_decode_frame_from_buf(avctx, frame, got_frame,
2876  avpkt, avpkt->data, avpkt->size);
2877 }
2878 
2879 
2880 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2881  * even without having called ff_mjpeg_decode_init(). */
2883 {
2884  MJpegDecodeContext *s = avctx->priv_data;
2885  int i, j;
2886 
2887  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_num) {
2888  av_log(avctx, AV_LOG_INFO, "Single field\n");
2889  }
2890 
2891  av_frame_free(&s->picture);
2892  s->picture_ptr = NULL;
2893 
2894  av_frame_free(&s->smv_frame);
2895 
2896  av_freep(&s->buffer);
2897  av_freep(&s->stereo3d);
2898  av_freep(&s->ljpeg_buffer);
2899  s->ljpeg_buffer_size = 0;
2900 
2901  for (i = 0; i < 3; i++) {
2902  for (j = 0; j < 4; j++)
2903  ff_vlc_free(&s->vlcs[i][j]);
2904  }
2905  for (i = 0; i < MAX_COMPONENTS; i++) {
2906  av_freep(&s->blocks[i]);
2907  av_freep(&s->last_nnz[i]);
2908  }
2909  av_exif_free(&s->exif_metadata);
2910 
2912 
2913  av_freep(&s->hwaccel_picture_private);
2914  av_freep(&s->jls_state);
2915 
2916  return 0;
2917 }
2918 
2920 {
2921  MJpegDecodeContext *s = avctx->priv_data;
2922  s->got_picture = 0;
2923 
2924  s->smv_next_frame = 0;
2925  av_frame_unref(s->smv_frame);
2926 }
2927 
2928 #if CONFIG_MJPEG_DECODER
2929 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2930 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2931 static const AVOption options[] = {
2932  { "extern_huff", "Use external huffman table.",
2933  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2934  { NULL },
2935 };
2936 
2937 static const AVClass mjpegdec_class = {
2938  .class_name = "MJPEG decoder",
2939  .item_name = av_default_item_name,
2940  .option = options,
2941  .version = LIBAVUTIL_VERSION_INT,
2942 };
2943 
2944 const FFCodec ff_mjpeg_decoder = {
2945  .p.name = "mjpeg",
2946  CODEC_LONG_NAME("MJPEG (Motion JPEG)"),
2947  .p.type = AVMEDIA_TYPE_VIDEO,
2948  .p.id = AV_CODEC_ID_MJPEG,
2949  .priv_data_size = sizeof(MJpegDecodeContext),
2953  .flush = decode_flush,
2954  .p.capabilities = AV_CODEC_CAP_DR1,
2955  .p.max_lowres = 3,
2956  .p.priv_class = &mjpegdec_class,
2957  .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
2958  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
2961  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2962 #if CONFIG_MJPEG_NVDEC_HWACCEL
2963  HWACCEL_NVDEC(mjpeg),
2964 #endif
2965 #if CONFIG_MJPEG_VAAPI_HWACCEL
2966  HWACCEL_VAAPI(mjpeg),
2967 #endif
2968  NULL
2969  },
2970 };
2971 #endif
2972 #if CONFIG_THP_DECODER
2973 const FFCodec ff_thp_decoder = {
2974  .p.name = "thp",
2975  CODEC_LONG_NAME("Nintendo Gamecube THP video"),
2976  .p.type = AVMEDIA_TYPE_VIDEO,
2977  .p.id = AV_CODEC_ID_THP,
2978  .priv_data_size = sizeof(MJpegDecodeContext),
2982  .flush = decode_flush,
2983  .p.capabilities = AV_CODEC_CAP_DR1,
2984  .p.max_lowres = 3,
2985  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2986 };
2987 #endif
2988 
2989 #if CONFIG_SMVJPEG_DECODER
2990 // SMV JPEG just stacks several output frames into one JPEG picture
2991 // we handle that by setting up the cropping parameters appropriately
2992 static void smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
2993 {
2994  MJpegDecodeContext *s = avctx->priv_data;
2995 
2996  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2997 
2998  frame->width = avctx->coded_width;
2999  frame->height = avctx->coded_height;
3000  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
3001  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
3002 
3003  if (s->smv_frame->pts != AV_NOPTS_VALUE)
3004  s->smv_frame->pts += s->smv_frame->duration;
3005  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
3006 
3007  if (s->smv_next_frame == 0)
3008  av_frame_unref(s->smv_frame);
3009 }
3010 
3011 static int smvjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
3012 {
3013  MJpegDecodeContext *s = avctx->priv_data;
3014  AVPacket *const pkt = avctx->internal->in_pkt;
3015  int got_frame = 0;
3016  int ret;
3017 
3018  if (s->smv_next_frame > 0)
3019  goto return_frame;
3020 
3021  ret = ff_decode_get_packet(avctx, pkt);
3022  if (ret < 0)
3023  return ret;
3024 
3025  av_frame_unref(s->smv_frame);
3026 
3027  ret = ff_mjpeg_decode_frame(avctx, s->smv_frame, &got_frame, pkt);
3028  s->smv_frame->pkt_dts = pkt->dts;
3030  if (ret < 0)
3031  return ret;
3032 
3033  if (!got_frame)
3034  return AVERROR(EAGAIN);
3035 
3036  // packet duration covers all the frames in the packet
3037  s->smv_frame->duration /= s->smv_frames_per_jpeg;
3038 
3039 return_frame:
3040  av_assert0(s->smv_frame->buf[0]);
3041  ret = av_frame_ref(frame, s->smv_frame);
3042  if (ret < 0)
3043  return ret;
3044 
3045  smv_process_frame(avctx, frame);
3046  return 0;
3047 }
3048 
3049 const FFCodec ff_smvjpeg_decoder = {
3050  .p.name = "smvjpeg",
3051  CODEC_LONG_NAME("SMV JPEG"),
3052  .p.type = AVMEDIA_TYPE_VIDEO,
3053  .p.id = AV_CODEC_ID_SMVJPEG,
3054  .priv_data_size = sizeof(MJpegDecodeContext),
3057  FF_CODEC_RECEIVE_FRAME_CB(smvjpeg_receive_frame),
3058  .flush = decode_flush,
3059  .p.capabilities = AV_CODEC_CAP_DR1,
3060  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3062 };
3063 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
flags
const SwsFlags flags[]
Definition: swscale.c:61
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1413
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:280
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:249
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:208
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:498
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:360
opt.h
av_exif_parse_buffer
int av_exif_parse_buffer(void *logctx, const uint8_t *buf, size_t size, AVExifMetadata *ifd, enum AVExifHeaderMode header_mode)
Decodes the EXIF data provided in the buffer and writes it into the struct *ifd.
Definition: exif.c:860
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1208
out
FILE * out
Definition: movenc.c:55
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1428
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
APP1
@ APP1
Definition: mjpeg.h:80
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:980
SOF0
@ SOF0
Definition: mjpeg.h:39
src1
const pixel * src1
Definition: h264pred_template.c:420
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1406
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:573
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:424
ff_smvjpeg_decoder
const FFCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:260
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:254
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:112
mask
int mask
Definition: mediacodecdec_common.c:154
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:230
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:595
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:588
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:690
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:42
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:149
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:40
ff_mjpeg_val_dc
const uint8_t ff_mjpeg_val_dc[]
Definition: jpegtabs.h:34
FFCodec
Definition: codec_internal.h:127
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:213
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_mjpeg_bits_ac_chrominance
const uint8_t ff_mjpeg_bits_ac_chrominance[]
Definition: jpegtabs.h:66
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:152
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:517
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1383
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:655
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:251
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:383
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:64
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
rgb
Definition: rpzaenc.c:60
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:237
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1271
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1444
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
FFHWAccel
Definition: hwaccel_internal.h:34
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:448
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:121
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1670
fail
#define fail()
Definition: checkasm.h:214
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:52
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:597
SOF3
@ SOF3
Definition: mjpeg.h:42
GetBitContext
Definition: get_bits.h:109
ff_mjpeg_decode_frame_from_buf
int ff_mjpeg_decode_frame_from_buf(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *avpkt, const uint8_t *buf, const int buf_size)
Definition: mjpegdec.c:2355
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2154
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:58
av_exif_free
void av_exif_free(AVExifMetadata *ifd)
Frees all resources associated with the given EXIF metadata struct.
Definition: exif.c:650
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:615
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:522
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:283
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:191
aligned
static int aligned(int val)
Definition: dashdec.c:171
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:883
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:551
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1646
AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: defs.h:173
COM
@ COM
Definition: mjpeg.h:111
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: defs.h:212
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1063
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:189
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:523
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:126
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:552
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
g
const char * g
Definition: vf_curves.c:128
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:355
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:550
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2341
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2882
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
PutBitContext
Definition: put_bits.h:50
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:212
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:561
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:529
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_decode_exif_attach_ifd
int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd)
Definition: decode.c:2430
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
av_clip_int16
#define av_clip_int16
Definition: common.h:115
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:530
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1635
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:207
AV_EXIF_TIFF_HEADER
@ AV_EXIF_TIFF_HEADER
The TIFF header starts with 0x49492a00, or 0x4d4d002a.
Definition: exif.h:63
hwaccel_internal.h
AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: defs.h:174
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:194
SOF13
@ SOF13
Definition: mjpeg.h:52
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:474
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index, int *val)
Definition: mjpegdec.c:821
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:242
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
profiles.h
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
options
Definition: swscale.c:43
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:235
MJpegDecodeContext
Definition: mjpegdec.h:55
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1459
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:998
lowres
static int lowres
Definition: ffplay.c:330
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1580
ff_mjpeg_val_ac_chrominance
const uint8_t ff_mjpeg_val_ac_chrominance[]
Definition: jpegtabs.h:69
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:651
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:101
DRI
@ DRI
Definition: mjpeg.h:75
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1705
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1729
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1098
ff_mjpeg_val_ac_luminance
const uint8_t ff_mjpeg_val_ac_luminance[]
Definition: jpegtabs.h:42
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2872
av_bswap32
#define av_bswap32
Definition: bswap.h:47
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:900
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1670
AV_PROFILE_MJPEG_JPEG_LS
#define AV_PROFILE_MJPEG_JPEG_LS
Definition: defs.h:177
ff_mjpeg_bits_ac_luminance
const uint8_t ff_mjpeg_bits_ac_luminance[]
Definition: jpegtabs.h:40
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:268
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2127
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
SOF15
@ SOF15
Definition: mjpeg.h:54
AVCodecHWConfigInternal
Definition: hwconfig.h:25
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:177
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:174
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:294
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:170
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2194
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:194
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
ff_thp_decoder
const FFCodec ff_thp_decoder
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:180
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2334
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:836
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:522
AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: defs.h:176
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:373
VD
#define VD
Definition: amfdec.c:665
src2
const pixel * src2
Definition: h264pred_template.c:421
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: defs.h:215
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:76
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1835
AVCodecInternal::in_pkt
AVPacket * in_pkt
This packet is used to hold the packet given to decoders implementing the .decode API; it is unused b...
Definition: internal.h:83
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:63
decode_flush
static av_cold void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2919
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1390
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:107
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:684
len
int len
Definition: vorbis_enc_data.h:426
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:600
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:1886
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:972
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:35
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
av_malloc
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:98
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:207
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:560
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
pos
unsigned int pos
Definition: spdifenc.c:414
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1387
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
id
enum AVCodecID id
Definition: dts2pts.c:549
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2221
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:439
FF_CODEC_RECEIVE_FRAME_CB
#define FF_CODEC_RECEIVE_FRAME_CB(func)
Definition: codec_internal.h:355
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:247
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:799
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
APP2
@ APP2
Definition: mjpeg.h:81
FF_HW_CALL
#define FF_HW_CALL(avctx, function,...)
Definition: hwaccel_internal.h:173
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1626
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:264
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_mjpeg_bits_dc_chrominance
const uint8_t ff_mjpeg_bits_dc_chrominance[]
Definition: jpegtabs.h:37
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1382
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:298
APP0
@ APP0
Definition: mjpeg.h:79
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:615
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1847
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:54
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
SOF1
@ SOF1
Definition: mjpeg.h:40
w
uint8_t w
Definition: llvidencdsp.c:39
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:464
ff_mjpeg_bits_dc_luminance
const FF_VISIBILITY_PUSH_HIDDEN uint8_t ff_mjpeg_bits_dc_luminance[]
Definition: jpegtabs.h:32
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1645
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:46
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
SOF7
@ SOF7
Definition: mjpeg.h:46
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define AV_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: defs.h:175
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
return_frame
static int return_frame(AVFilterContext *ctx, int is_second)
Definition: yadif_common.c:28
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:663
SOF6
@ SOF6
Definition: mjpeg.h:45
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:383
src
#define src
Definition: vp8dsp.c:248
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:347