FFmpeg
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "decode.h"
40 #include "hwconfig.h"
41 #include "idctdsp.h"
42 #include "internal.h"
43 #include "jpegtables.h"
44 #include "mjpeg.h"
45 #include "mjpegdec.h"
46 #include "jpeglsdec.h"
47 #include "profiles.h"
48 #include "put_bits.h"
49 #include "tiff.h"
50 #include "exif.h"
51 #include "bytestream.h"
52 
53 
55 {
56  static const struct {
57  int class;
58  int index;
59  const uint8_t *bits;
60  const uint8_t *values;
61  int length;
62  } ht[] = {
64  avpriv_mjpeg_val_dc, 12 },
66  avpriv_mjpeg_val_dc, 12 },
75  };
76  int i, ret;
77 
78  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
79  ff_free_vlc(&s->vlcs[ht[i].class][ht[i].index]);
80  ret = ff_mjpeg_build_vlc(&s->vlcs[ht[i].class][ht[i].index],
81  ht[i].bits, ht[i].values,
82  ht[i].class == 1, s->avctx);
83  if (ret < 0)
84  return ret;
85 
86  if (ht[i].class < 2) {
87  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
88  ht[i].bits + 1, 16);
89  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
90  ht[i].values, ht[i].length);
91  }
92  }
93 
94  return 0;
95 }
96 
97 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
98 {
99  s->buggy_avid = 1;
100  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
101  s->interlace_polarity = 1;
102  if (len > 14 && buf[12] == 2) /* 2 - PAL */
103  s->interlace_polarity = 0;
104  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
105  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
106 }
107 
108 static void init_idct(AVCodecContext *avctx)
109 {
110  MJpegDecodeContext *s = avctx->priv_data;
111 
112  ff_idctdsp_init(&s->idsp, avctx);
113  ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
115 }
116 
118 {
119  MJpegDecodeContext *s = avctx->priv_data;
120  int ret;
121 
122  if (!s->picture_ptr) {
123  s->picture = av_frame_alloc();
124  if (!s->picture)
125  return AVERROR(ENOMEM);
126  s->picture_ptr = s->picture;
127  }
128 
129  s->pkt = av_packet_alloc();
130  if (!s->pkt)
131  return AVERROR(ENOMEM);
132 
133  s->avctx = avctx;
134  ff_blockdsp_init(&s->bdsp, avctx);
135  ff_hpeldsp_init(&s->hdsp, avctx->flags);
136  init_idct(avctx);
137  s->buffer_size = 0;
138  s->buffer = NULL;
139  s->start_code = -1;
140  s->first_picture = 1;
141  s->got_picture = 0;
142  s->orig_height = avctx->coded_height;
144  avctx->colorspace = AVCOL_SPC_BT470BG;
145  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
146 
147  if ((ret = init_default_huffman_tables(s)) < 0)
148  return ret;
149 
150  if (s->extern_huff) {
151  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
152  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
153  return ret;
154  if (ff_mjpeg_decode_dht(s)) {
155  av_log(avctx, AV_LOG_ERROR,
156  "error using external huffman table, switching back to internal\n");
157  if ((ret = init_default_huffman_tables(s)) < 0)
158  return ret;
159  }
160  }
161  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
162  s->interlace_polarity = 1; /* bottom field first */
163  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
164  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
165  if (avctx->codec_tag == AV_RL32("MJPG"))
166  s->interlace_polarity = 1;
167  }
168 
169  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
170  if (avctx->extradata_size >= 4)
171  s->smv_frames_per_jpeg = AV_RL32(avctx->extradata);
172 
173  if (s->smv_frames_per_jpeg <= 0) {
174  av_log(avctx, AV_LOG_ERROR, "Invalid number of frames per jpeg.\n");
175  return AVERROR_INVALIDDATA;
176  }
177 
178  s->smv_frame = av_frame_alloc();
179  if (!s->smv_frame)
180  return AVERROR(ENOMEM);
181  } else if (avctx->extradata_size > 8
182  && AV_RL32(avctx->extradata) == 0x2C
183  && AV_RL32(avctx->extradata+4) == 0x18) {
184  parse_avid(s, avctx->extradata, avctx->extradata_size);
185  }
186 
187  if (avctx->codec->id == AV_CODEC_ID_AMV)
188  s->flipped = 1;
189 
190  return 0;
191 }
192 
193 
194 /* quantize tables */
196 {
197  int len, index, i;
198 
199  len = get_bits(&s->gb, 16) - 2;
200 
201  if (8*len > get_bits_left(&s->gb)) {
202  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
203  return AVERROR_INVALIDDATA;
204  }
205 
206  while (len >= 65) {
207  int pr = get_bits(&s->gb, 4);
208  if (pr > 1) {
209  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
210  return AVERROR_INVALIDDATA;
211  }
212  index = get_bits(&s->gb, 4);
213  if (index >= 4)
214  return -1;
215  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
216  /* read quant table */
217  for (i = 0; i < 64; i++) {
218  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
219  if (s->quant_matrixes[index][i] == 0) {
220  av_log(s->avctx, AV_LOG_ERROR, "dqt: 0 quant value\n");
221  return AVERROR_INVALIDDATA;
222  }
223  }
224 
225  // XXX FIXME fine-tune, and perhaps add dc too
226  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
227  s->quant_matrixes[index][8]) >> 1;
228  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
229  index, s->qscale[index]);
230  len -= 1 + 64 * (1+pr);
231  }
232  return 0;
233 }
234 
235 /* decode huffman tables and build VLC decoders */
237 {
238  int len, index, i, class, n, v;
239  uint8_t bits_table[17];
240  uint8_t val_table[256];
241  int ret = 0;
242 
243  len = get_bits(&s->gb, 16) - 2;
244 
245  if (8*len > get_bits_left(&s->gb)) {
246  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
247  return AVERROR_INVALIDDATA;
248  }
249 
250  while (len > 0) {
251  if (len < 17)
252  return AVERROR_INVALIDDATA;
253  class = get_bits(&s->gb, 4);
254  if (class >= 2)
255  return AVERROR_INVALIDDATA;
256  index = get_bits(&s->gb, 4);
257  if (index >= 4)
258  return AVERROR_INVALIDDATA;
259  n = 0;
260  for (i = 1; i <= 16; i++) {
261  bits_table[i] = get_bits(&s->gb, 8);
262  n += bits_table[i];
263  }
264  len -= 17;
265  if (len < n || n > 256)
266  return AVERROR_INVALIDDATA;
267 
268  for (i = 0; i < n; i++) {
269  v = get_bits(&s->gb, 8);
270  val_table[i] = v;
271  }
272  len -= n;
273 
274  /* build VLC and flush previous vlc if present */
275  ff_free_vlc(&s->vlcs[class][index]);
276  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
277  class, index, n);
278  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[class][index], bits_table,
279  val_table, class > 0, s->avctx)) < 0)
280  return ret;
281 
282  if (class > 0) {
283  ff_free_vlc(&s->vlcs[2][index]);
284  if ((ret = ff_mjpeg_build_vlc(&s->vlcs[2][index], bits_table,
285  val_table, 0, s->avctx)) < 0)
286  return ret;
287  }
288 
289  for (i = 0; i < 16; i++)
290  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
291  for (i = 0; i < 256; i++)
292  s->raw_huffman_values[class][index][i] = val_table[i];
293  }
294  return 0;
295 }
296 
298 {
299  int len, nb_components, i, width, height, bits, ret, size_change;
300  unsigned pix_fmt_id;
301  int h_count[MAX_COMPONENTS] = { 0 };
302  int v_count[MAX_COMPONENTS] = { 0 };
303 
304  s->cur_scan = 0;
305  memset(s->upscale_h, 0, sizeof(s->upscale_h));
306  memset(s->upscale_v, 0, sizeof(s->upscale_v));
307 
308  len = get_bits(&s->gb, 16);
309  bits = get_bits(&s->gb, 8);
310 
311  if (bits > 16 || bits < 1) {
312  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
313  return AVERROR_INVALIDDATA;
314  }
315 
316  if (s->avctx->bits_per_raw_sample != bits) {
317  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
318  s->avctx->bits_per_raw_sample = bits;
319  init_idct(s->avctx);
320  }
321  if (s->pegasus_rct)
322  bits = 9;
323  if (bits == 9 && !s->pegasus_rct)
324  s->rct = 1; // FIXME ugly
325 
326  if(s->lossless && s->avctx->lowres){
327  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
328  return -1;
329  }
330 
331  height = get_bits(&s->gb, 16);
332  width = get_bits(&s->gb, 16);
333 
334  // HACK for odd_height.mov
335  if (s->interlaced && s->width == width && s->height == height + 1)
336  height= s->height;
337 
338  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
339  if (av_image_check_size(width, height, 0, s->avctx) < 0)
340  return AVERROR_INVALIDDATA;
341  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
342  return AVERROR_INVALIDDATA;
343 
344  nb_components = get_bits(&s->gb, 8);
345  if (nb_components <= 0 ||
346  nb_components > MAX_COMPONENTS)
347  return -1;
348  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
349  if (nb_components != s->nb_components) {
350  av_log(s->avctx, AV_LOG_ERROR,
351  "nb_components changing in interlaced picture\n");
352  return AVERROR_INVALIDDATA;
353  }
354  }
355  if (s->ls && !(bits <= 8 || nb_components == 1)) {
357  "JPEG-LS that is not <= 8 "
358  "bits/component or 16-bit gray");
359  return AVERROR_PATCHWELCOME;
360  }
361  if (len != 8 + 3 * nb_components) {
362  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
363  return AVERROR_INVALIDDATA;
364  }
365 
366  s->nb_components = nb_components;
367  s->h_max = 1;
368  s->v_max = 1;
369  for (i = 0; i < nb_components; i++) {
370  /* component id */
371  s->component_id[i] = get_bits(&s->gb, 8) - 1;
372  h_count[i] = get_bits(&s->gb, 4);
373  v_count[i] = get_bits(&s->gb, 4);
374  /* compute hmax and vmax (only used in interleaved case) */
375  if (h_count[i] > s->h_max)
376  s->h_max = h_count[i];
377  if (v_count[i] > s->v_max)
378  s->v_max = v_count[i];
379  s->quant_index[i] = get_bits(&s->gb, 8);
380  if (s->quant_index[i] >= 4) {
381  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
382  return AVERROR_INVALIDDATA;
383  }
384  if (!h_count[i] || !v_count[i]) {
385  av_log(s->avctx, AV_LOG_ERROR,
386  "Invalid sampling factor in component %d %d:%d\n",
387  i, h_count[i], v_count[i]);
388  return AVERROR_INVALIDDATA;
389  }
390 
391  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
392  i, h_count[i], v_count[i],
393  s->component_id[i], s->quant_index[i]);
394  }
395  if ( nb_components == 4
396  && s->component_id[0] == 'C' - 1
397  && s->component_id[1] == 'M' - 1
398  && s->component_id[2] == 'Y' - 1
399  && s->component_id[3] == 'K' - 1)
400  s->adobe_transform = 0;
401 
402  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
403  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
404  return AVERROR_PATCHWELCOME;
405  }
406 
407  if (s->bayer) {
408  if (nb_components == 2) {
409  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
410  width stored in their SOF3 markers is the width of each one. We only output
411  a single component, therefore we need to adjust the output image width. We
412  handle the deinterleaving (but not the debayering) in this file. */
413  width *= 2;
414  }
415  /* They can also contain 1 component, which is double the width and half the height
416  of the final image (rows are interleaved). We don't handle the decoding in this
417  file, but leave that to the TIFF/DNG decoder. */
418  }
419 
420  /* if different size, realloc/alloc picture */
421  if (width != s->width || height != s->height || bits != s->bits ||
422  memcmp(s->h_count, h_count, sizeof(h_count)) ||
423  memcmp(s->v_count, v_count, sizeof(v_count))) {
424  size_change = 1;
425 
426  s->width = width;
427  s->height = height;
428  s->bits = bits;
429  memcpy(s->h_count, h_count, sizeof(h_count));
430  memcpy(s->v_count, v_count, sizeof(v_count));
431  s->interlaced = 0;
432  s->got_picture = 0;
433 
434  /* test interlaced mode */
435  if (s->first_picture &&
436  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
437  s->orig_height != 0 &&
438  s->height < ((s->orig_height * 3) / 4)) {
439  s->interlaced = 1;
440  s->bottom_field = s->interlace_polarity;
441  s->picture_ptr->interlaced_frame = 1;
442  s->picture_ptr->top_field_first = !s->interlace_polarity;
443  height *= 2;
444  }
445 
446  ret = ff_set_dimensions(s->avctx, width, height);
447  if (ret < 0)
448  return ret;
449 
450  if ((s->avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
451  s->avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
452  s->orig_height < height)
453  s->avctx->height = AV_CEIL_RSHIFT(s->orig_height, s->avctx->lowres);
454 
455  s->first_picture = 0;
456  } else {
457  size_change = 0;
458  }
459 
460  if (s->avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
461  s->avctx->height = s->avctx->coded_height / s->smv_frames_per_jpeg;
462  if (s->avctx->height <= 0)
463  return AVERROR_INVALIDDATA;
464  }
465 
466  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
467  if (s->progressive) {
468  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
469  return AVERROR_INVALIDDATA;
470  }
471  } else {
472  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
473  s->rgb = 1;
474  else if (!s->lossless)
475  s->rgb = 0;
476  /* XXX: not complete test ! */
477  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
478  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
479  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
480  (s->h_count[3] << 4) | s->v_count[3];
481  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
482  /* NOTE we do not allocate pictures large enough for the possible
483  * padding of h/v_count being 4 */
484  if (!(pix_fmt_id & 0xD0D0D0D0))
485  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
486  if (!(pix_fmt_id & 0x0D0D0D0D))
487  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
488 
489  for (i = 0; i < 8; i++) {
490  int j = 6 + (i&1) - (i&6);
491  int is = (pix_fmt_id >> (4*i)) & 0xF;
492  int js = (pix_fmt_id >> (4*j)) & 0xF;
493 
494  if (is == 1 && js != 2 && (i < 2 || i > 5))
495  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
496  if (is == 1 && js != 2 && (i < 2 || i > 5))
497  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
498 
499  if (is == 1 && js == 2) {
500  if (i & 1) s->upscale_h[j/2] = 1;
501  else s->upscale_v[j/2] = 1;
502  }
503  }
504 
505  if (s->bayer) {
506  if (pix_fmt_id != 0x11110000 && pix_fmt_id != 0x11000000)
507  goto unk_pixfmt;
508  }
509 
510  switch (pix_fmt_id) {
511  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
512  if (!s->bayer)
513  goto unk_pixfmt;
514  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
515  break;
516  case 0x11111100:
517  if (s->rgb)
518  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
519  else {
520  if ( s->adobe_transform == 0
521  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
522  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
523  } else {
524  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
525  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
526  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
527  }
528  }
529  av_assert0(s->nb_components == 3);
530  break;
531  case 0x11111111:
532  if (s->rgb)
533  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
534  else {
535  if (s->adobe_transform == 0 && s->bits <= 8) {
536  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
537  } else {
538  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
539  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
540  }
541  }
542  av_assert0(s->nb_components == 4);
543  break;
544  case 0x22111122:
545  case 0x22111111:
546  if (s->adobe_transform == 0 && s->bits <= 8) {
547  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
548  s->upscale_v[1] = s->upscale_v[2] = 1;
549  s->upscale_h[1] = s->upscale_h[2] = 1;
550  } else if (s->adobe_transform == 2 && s->bits <= 8) {
551  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
552  s->upscale_v[1] = s->upscale_v[2] = 1;
553  s->upscale_h[1] = s->upscale_h[2] = 1;
554  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
555  } else {
556  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
557  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
558  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
559  }
560  av_assert0(s->nb_components == 4);
561  break;
562  case 0x12121100:
563  case 0x22122100:
564  case 0x21211100:
565  case 0x22211200:
566  case 0x22221100:
567  case 0x22112200:
568  case 0x11222200:
569  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
570  else
571  goto unk_pixfmt;
572  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
573  break;
574  case 0x11000000:
575  case 0x13000000:
576  case 0x14000000:
577  case 0x31000000:
578  case 0x33000000:
579  case 0x34000000:
580  case 0x41000000:
581  case 0x43000000:
582  case 0x44000000:
583  if(s->bits <= 8)
584  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
585  else
586  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
587  break;
588  case 0x12111100:
589  case 0x14121200:
590  case 0x14111100:
591  case 0x22211100:
592  case 0x22112100:
593  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
594  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
595  else
596  goto unk_pixfmt;
597  s->upscale_v[0] = s->upscale_v[1] = 1;
598  } else {
599  if (pix_fmt_id == 0x14111100)
600  s->upscale_v[1] = s->upscale_v[2] = 1;
601  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
602  else
603  goto unk_pixfmt;
604  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
605  }
606  break;
607  case 0x21111100:
608  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
609  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
610  else
611  goto unk_pixfmt;
612  s->upscale_h[0] = s->upscale_h[1] = 1;
613  } else {
614  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
615  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
616  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
617  }
618  break;
619  case 0x31111100:
620  if (s->bits > 8)
621  goto unk_pixfmt;
622  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
623  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
624  s->upscale_h[1] = s->upscale_h[2] = 2;
625  break;
626  case 0x22121100:
627  case 0x22111200:
628  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
629  else
630  goto unk_pixfmt;
631  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
632  break;
633  case 0x22111100:
634  case 0x23111100:
635  case 0x42111100:
636  case 0x24111100:
637  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
638  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
639  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
640  if (pix_fmt_id == 0x42111100) {
641  if (s->bits > 8)
642  goto unk_pixfmt;
643  s->upscale_h[1] = s->upscale_h[2] = 1;
644  } else if (pix_fmt_id == 0x24111100) {
645  if (s->bits > 8)
646  goto unk_pixfmt;
647  s->upscale_v[1] = s->upscale_v[2] = 1;
648  } else if (pix_fmt_id == 0x23111100) {
649  if (s->bits > 8)
650  goto unk_pixfmt;
651  s->upscale_v[1] = s->upscale_v[2] = 2;
652  }
653  break;
654  case 0x41111100:
655  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
656  else
657  goto unk_pixfmt;
658  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
659  break;
660  default:
661  unk_pixfmt:
662  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
663  memset(s->upscale_h, 0, sizeof(s->upscale_h));
664  memset(s->upscale_v, 0, sizeof(s->upscale_v));
665  return AVERROR_PATCHWELCOME;
666  }
667  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
668  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
669  return AVERROR_PATCHWELCOME;
670  }
671  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
672  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
673  return AVERROR_PATCHWELCOME;
674  }
675  if (s->ls) {
676  memset(s->upscale_h, 0, sizeof(s->upscale_h));
677  memset(s->upscale_v, 0, sizeof(s->upscale_v));
678  if (s->nb_components == 3) {
679  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
680  } else if (s->nb_components != 1) {
681  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
682  return AVERROR_PATCHWELCOME;
683  } else if (s->palette_index && s->bits <= 8)
684  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
685  else if (s->bits <= 8)
686  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
687  else
688  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
689  }
690 
691  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
692  if (!s->pix_desc) {
693  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
694  return AVERROR_BUG;
695  }
696 
697  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
698  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
699  } else {
700  enum AVPixelFormat pix_fmts[] = {
701 #if CONFIG_MJPEG_NVDEC_HWACCEL
703 #endif
704 #if CONFIG_MJPEG_VAAPI_HWACCEL
706 #endif
707  s->avctx->pix_fmt,
709  };
710  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
711  if (s->hwaccel_pix_fmt < 0)
712  return AVERROR(EINVAL);
713 
714  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
715  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
716  }
717 
718  if (s->avctx->skip_frame == AVDISCARD_ALL) {
719  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
720  s->picture_ptr->key_frame = 1;
721  s->got_picture = 1;
722  return 0;
723  }
724 
725  av_frame_unref(s->picture_ptr);
726  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
727  return -1;
728  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
729  s->picture_ptr->key_frame = 1;
730  s->got_picture = 1;
731 
732  for (i = 0; i < 4; i++)
733  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
734 
735  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
736  s->width, s->height, s->linesize[0], s->linesize[1],
737  s->interlaced, s->avctx->height);
738 
739  }
740 
741  if ((s->rgb && !s->lossless && !s->ls) ||
742  (!s->rgb && s->ls && s->nb_components > 1) ||
743  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
744  ) {
745  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
746  return AVERROR_PATCHWELCOME;
747  }
748 
749  /* totally blank picture as progressive JPEG will only add details to it */
750  if (s->progressive) {
751  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
752  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
753  for (i = 0; i < s->nb_components; i++) {
754  int size = bw * bh * s->h_count[i] * s->v_count[i];
755  av_freep(&s->blocks[i]);
756  av_freep(&s->last_nnz[i]);
757  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
758  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
759  if (!s->blocks[i] || !s->last_nnz[i])
760  return AVERROR(ENOMEM);
761  s->block_stride[i] = bw * s->h_count[i];
762  }
763  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
764  }
765 
766  if (s->avctx->hwaccel) {
767  s->hwaccel_picture_private =
768  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
769  if (!s->hwaccel_picture_private)
770  return AVERROR(ENOMEM);
771 
772  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
773  s->raw_image_buffer_size);
774  if (ret < 0)
775  return ret;
776  }
777 
778  return 0;
779 }
780 
781 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
782 {
783  int code;
784  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
785  if (code < 0 || code > 16) {
786  av_log(s->avctx, AV_LOG_WARNING,
787  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
788  0, dc_index, &s->vlcs[0][dc_index]);
789  return 0xfffff;
790  }
791 
792  if (code)
793  return get_xbits(&s->gb, code);
794  else
795  return 0;
796 }
797 
798 /* decode block and dequantize */
799 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
800  int dc_index, int ac_index, uint16_t *quant_matrix)
801 {
802  int code, i, j, level, val;
803 
804  /* DC coef */
805  val = mjpeg_decode_dc(s, dc_index);
806  if (val == 0xfffff) {
807  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
808  return AVERROR_INVALIDDATA;
809  }
810  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
811  val = av_clip_int16(val);
812  s->last_dc[component] = val;
813  block[0] = val;
814  /* AC coefs */
815  i = 0;
816  {OPEN_READER(re, &s->gb);
817  do {
818  UPDATE_CACHE(re, &s->gb);
819  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
820 
821  i += ((unsigned)code) >> 4;
822  code &= 0xf;
823  if (code) {
824  if (code > MIN_CACHE_BITS - 16)
825  UPDATE_CACHE(re, &s->gb);
826 
827  {
828  int cache = GET_CACHE(re, &s->gb);
829  int sign = (~cache) >> 31;
830  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
831  }
832 
833  LAST_SKIP_BITS(re, &s->gb, code);
834 
835  if (i > 63) {
836  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
837  return AVERROR_INVALIDDATA;
838  }
839  j = s->scantable.permutated[i];
840  block[j] = level * quant_matrix[i];
841  }
842  } while (i < 63);
843  CLOSE_READER(re, &s->gb);}
844 
845  return 0;
846 }
847 
849  int component, int dc_index,
850  uint16_t *quant_matrix, int Al)
851 {
852  unsigned val;
853  s->bdsp.clear_block(block);
854  val = mjpeg_decode_dc(s, dc_index);
855  if (val == 0xfffff) {
856  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
857  return AVERROR_INVALIDDATA;
858  }
859  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
860  s->last_dc[component] = val;
861  block[0] = val;
862  return 0;
863 }
864 
865 /* decode block and dequantize - progressive JPEG version */
867  uint8_t *last_nnz, int ac_index,
868  uint16_t *quant_matrix,
869  int ss, int se, int Al, int *EOBRUN)
870 {
871  int code, i, j, val, run;
872  unsigned level;
873 
874  if (*EOBRUN) {
875  (*EOBRUN)--;
876  return 0;
877  }
878 
879  {
880  OPEN_READER(re, &s->gb);
881  for (i = ss; ; i++) {
882  UPDATE_CACHE(re, &s->gb);
883  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
884 
885  run = ((unsigned) code) >> 4;
886  code &= 0xF;
887  if (code) {
888  i += run;
889  if (code > MIN_CACHE_BITS - 16)
890  UPDATE_CACHE(re, &s->gb);
891 
892  {
893  int cache = GET_CACHE(re, &s->gb);
894  int sign = (~cache) >> 31;
895  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
896  }
897 
898  LAST_SKIP_BITS(re, &s->gb, code);
899 
900  if (i >= se) {
901  if (i == se) {
902  j = s->scantable.permutated[se];
903  block[j] = level * (quant_matrix[se] << Al);
904  break;
905  }
906  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
907  return AVERROR_INVALIDDATA;
908  }
909  j = s->scantable.permutated[i];
910  block[j] = level * (quant_matrix[i] << Al);
911  } else {
912  if (run == 0xF) {// ZRL - skip 15 coefficients
913  i += 15;
914  if (i >= se) {
915  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
916  return AVERROR_INVALIDDATA;
917  }
918  } else {
919  val = (1 << run);
920  if (run) {
921  UPDATE_CACHE(re, &s->gb);
922  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
923  LAST_SKIP_BITS(re, &s->gb, run);
924  }
925  *EOBRUN = val - 1;
926  break;
927  }
928  }
929  }
930  CLOSE_READER(re, &s->gb);
931  }
932 
933  if (i > *last_nnz)
934  *last_nnz = i;
935 
936  return 0;
937 }
938 
939 #define REFINE_BIT(j) { \
940  UPDATE_CACHE(re, &s->gb); \
941  sign = block[j] >> 15; \
942  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
943  ((quant_matrix[i] ^ sign) - sign) << Al; \
944  LAST_SKIP_BITS(re, &s->gb, 1); \
945 }
946 
947 #define ZERO_RUN \
948 for (; ; i++) { \
949  if (i > last) { \
950  i += run; \
951  if (i > se) { \
952  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
953  return -1; \
954  } \
955  break; \
956  } \
957  j = s->scantable.permutated[i]; \
958  if (block[j]) \
959  REFINE_BIT(j) \
960  else if (run-- == 0) \
961  break; \
962 }
963 
964 /* decode block and dequantize - progressive JPEG refinement pass */
966  uint8_t *last_nnz,
967  int ac_index, uint16_t *quant_matrix,
968  int ss, int se, int Al, int *EOBRUN)
969 {
970  int code, i = ss, j, sign, val, run;
971  int last = FFMIN(se, *last_nnz);
972 
973  OPEN_READER(re, &s->gb);
974  if (*EOBRUN) {
975  (*EOBRUN)--;
976  } else {
977  for (; ; i++) {
978  UPDATE_CACHE(re, &s->gb);
979  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
980 
981  if (code & 0xF) {
982  run = ((unsigned) code) >> 4;
983  UPDATE_CACHE(re, &s->gb);
984  val = SHOW_UBITS(re, &s->gb, 1);
985  LAST_SKIP_BITS(re, &s->gb, 1);
986  ZERO_RUN;
987  j = s->scantable.permutated[i];
988  val--;
989  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
990  if (i == se) {
991  if (i > *last_nnz)
992  *last_nnz = i;
993  CLOSE_READER(re, &s->gb);
994  return 0;
995  }
996  } else {
997  run = ((unsigned) code) >> 4;
998  if (run == 0xF) {
999  ZERO_RUN;
1000  } else {
1001  val = run;
1002  run = (1 << run);
1003  if (val) {
1004  UPDATE_CACHE(re, &s->gb);
1005  run += SHOW_UBITS(re, &s->gb, val);
1006  LAST_SKIP_BITS(re, &s->gb, val);
1007  }
1008  *EOBRUN = run - 1;
1009  break;
1010  }
1011  }
1012  }
1013 
1014  if (i > *last_nnz)
1015  *last_nnz = i;
1016  }
1017 
1018  for (; i <= last; i++) {
1019  j = s->scantable.permutated[i];
1020  if (block[j])
1021  REFINE_BIT(j)
1022  }
1023  CLOSE_READER(re, &s->gb);
1024 
1025  return 0;
1026 }
1027 #undef REFINE_BIT
1028 #undef ZERO_RUN
1029 
1030 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1031 {
1032  int i;
1033  int reset = 0;
1034 
1035  if (s->restart_interval) {
1036  s->restart_count--;
1037  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1038  align_get_bits(&s->gb);
1039  for (i = 0; i < nb_components; i++) /* reset dc */
1040  s->last_dc[i] = (4 << s->bits);
1041  }
1042 
1043  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1044  /* skip RSTn */
1045  if (s->restart_count == 0) {
1046  if( show_bits(&s->gb, i) == (1 << i) - 1
1047  || show_bits(&s->gb, i) == 0xFF) {
1048  int pos = get_bits_count(&s->gb);
1049  align_get_bits(&s->gb);
1050  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1051  skip_bits(&s->gb, 8);
1052  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1053  for (i = 0; i < nb_components; i++) /* reset dc */
1054  s->last_dc[i] = (4 << s->bits);
1055  reset = 1;
1056  } else
1057  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1058  }
1059  }
1060  }
1061  return reset;
1062 }
1063 
1064 /* Handles 1 to 4 components */
1065 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1066 {
1067  int i, mb_x, mb_y;
1068  unsigned width;
1069  uint16_t (*buffer)[4];
1070  int left[4], top[4], topleft[4];
1071  const int linesize = s->linesize[0];
1072  const int mask = ((1 << s->bits) - 1) << point_transform;
1073  int resync_mb_y = 0;
1074  int resync_mb_x = 0;
1075  int vpred[6];
1076 
1077  if (!s->bayer && s->nb_components < 3)
1078  return AVERROR_INVALIDDATA;
1079  if (s->bayer && s->nb_components > 2)
1080  return AVERROR_INVALIDDATA;
1081  if (s->nb_components <= 0 || s->nb_components > 4)
1082  return AVERROR_INVALIDDATA;
1083  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1084  return AVERROR_INVALIDDATA;
1085 
1086 
1087  s->restart_count = s->restart_interval;
1088 
1089  if (s->restart_interval == 0)
1090  s->restart_interval = INT_MAX;
1091 
1092  if (s->bayer)
1093  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1094  else
1095  width = s->mb_width;
1096 
1097  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1098  if (!s->ljpeg_buffer)
1099  return AVERROR(ENOMEM);
1100 
1101  buffer = s->ljpeg_buffer;
1102 
1103  for (i = 0; i < 4; i++)
1104  buffer[0][i] = 1 << (s->bits - 1);
1105 
1106  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1107  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1108 
1109  if (s->interlaced && s->bottom_field)
1110  ptr += linesize >> 1;
1111 
1112  for (i = 0; i < 4; i++)
1113  top[i] = left[i] = topleft[i] = buffer[0][i];
1114 
1115  if ((mb_y * s->width) % s->restart_interval == 0) {
1116  for (i = 0; i < 6; i++)
1117  vpred[i] = 1 << (s->bits-1);
1118  }
1119 
1120  for (mb_x = 0; mb_x < width; mb_x++) {
1121  int modified_predictor = predictor;
1122 
1123  if (get_bits_left(&s->gb) < 1) {
1124  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1125  return AVERROR_INVALIDDATA;
1126  }
1127 
1128  if (s->restart_interval && !s->restart_count){
1129  s->restart_count = s->restart_interval;
1130  resync_mb_x = mb_x;
1131  resync_mb_y = mb_y;
1132  for(i=0; i<4; i++)
1133  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1134  }
1135  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1136  modified_predictor = 1;
1137 
1138  for (i=0;i<nb_components;i++) {
1139  int pred, dc;
1140 
1141  topleft[i] = top[i];
1142  top[i] = buffer[mb_x][i];
1143 
1144  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1145  if(dc == 0xFFFFF)
1146  return -1;
1147 
1148  if (!s->bayer || mb_x) {
1149  pred = left[i];
1150  } else { /* This path runs only for the first line in bayer images */
1151  vpred[i] += dc;
1152  pred = vpred[i] - dc;
1153  }
1154 
1155  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1156 
1157  left[i] = buffer[mb_x][i] =
1158  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1159  }
1160 
1161  if (s->restart_interval && !--s->restart_count) {
1162  align_get_bits(&s->gb);
1163  skip_bits(&s->gb, 16); /* skip RSTn */
1164  }
1165  }
1166  if (s->rct && s->nb_components == 4) {
1167  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1168  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1169  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1170  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1171  ptr[4*mb_x + 0] = buffer[mb_x][3];
1172  }
1173  } else if (s->nb_components == 4) {
1174  for(i=0; i<nb_components; i++) {
1175  int c= s->comp_index[i];
1176  if (s->bits <= 8) {
1177  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1178  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1179  }
1180  } else if(s->bits == 9) {
1181  return AVERROR_PATCHWELCOME;
1182  } else {
1183  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1184  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1185  }
1186  }
1187  }
1188  } else if (s->rct) {
1189  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1190  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1191  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1192  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1193  }
1194  } else if (s->pegasus_rct) {
1195  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1196  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1197  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1198  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1199  }
1200  } else if (s->bayer) {
1201  if (nb_components == 1) {
1202  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1203  for (mb_x = 0; mb_x < width; mb_x++)
1204  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1205  } else if (nb_components == 2) {
1206  for (mb_x = 0; mb_x < width; mb_x++) {
1207  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1208  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1209  }
1210  }
1211  } else {
1212  for(i=0; i<nb_components; i++) {
1213  int c= s->comp_index[i];
1214  if (s->bits <= 8) {
1215  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1216  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1217  }
1218  } else if(s->bits == 9) {
1219  return AVERROR_PATCHWELCOME;
1220  } else {
1221  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1222  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1223  }
1224  }
1225  }
1226  }
1227  }
1228  return 0;
1229 }
1230 
1232  int point_transform, int nb_components)
1233 {
1234  int i, mb_x, mb_y, mask;
1235  int bits= (s->bits+7)&~7;
1236  int resync_mb_y = 0;
1237  int resync_mb_x = 0;
1238 
1239  point_transform += bits - s->bits;
1240  mask = ((1 << s->bits) - 1) << point_transform;
1241 
1242  av_assert0(nb_components>=1 && nb_components<=4);
1243 
1244  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1245  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1246  if (get_bits_left(&s->gb) < 1) {
1247  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1248  return AVERROR_INVALIDDATA;
1249  }
1250  if (s->restart_interval && !s->restart_count){
1251  s->restart_count = s->restart_interval;
1252  resync_mb_x = mb_x;
1253  resync_mb_y = mb_y;
1254  }
1255 
1256  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1257  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1258  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1259  for (i = 0; i < nb_components; i++) {
1260  uint8_t *ptr;
1261  uint16_t *ptr16;
1262  int n, h, v, x, y, c, j, linesize;
1263  n = s->nb_blocks[i];
1264  c = s->comp_index[i];
1265  h = s->h_scount[i];
1266  v = s->v_scount[i];
1267  x = 0;
1268  y = 0;
1269  linesize= s->linesize[c];
1270 
1271  if(bits>8) linesize /= 2;
1272 
1273  for(j=0; j<n; j++) {
1274  int pred, dc;
1275 
1276  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1277  if(dc == 0xFFFFF)
1278  return -1;
1279  if ( h * mb_x + x >= s->width
1280  || v * mb_y + y >= s->height) {
1281  // Nothing to do
1282  } else if (bits<=8) {
1283  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1284  if(y==0 && toprow){
1285  if(x==0 && leftcol){
1286  pred= 1 << (bits - 1);
1287  }else{
1288  pred= ptr[-1];
1289  }
1290  }else{
1291  if(x==0 && leftcol){
1292  pred= ptr[-linesize];
1293  }else{
1294  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1295  }
1296  }
1297 
1298  if (s->interlaced && s->bottom_field)
1299  ptr += linesize >> 1;
1300  pred &= mask;
1301  *ptr= pred + ((unsigned)dc << point_transform);
1302  }else{
1303  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1304  if(y==0 && toprow){
1305  if(x==0 && leftcol){
1306  pred= 1 << (bits - 1);
1307  }else{
1308  pred= ptr16[-1];
1309  }
1310  }else{
1311  if(x==0 && leftcol){
1312  pred= ptr16[-linesize];
1313  }else{
1314  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1315  }
1316  }
1317 
1318  if (s->interlaced && s->bottom_field)
1319  ptr16 += linesize >> 1;
1320  pred &= mask;
1321  *ptr16= pred + ((unsigned)dc << point_transform);
1322  }
1323  if (++x == h) {
1324  x = 0;
1325  y++;
1326  }
1327  }
1328  }
1329  } else {
1330  for (i = 0; i < nb_components; i++) {
1331  uint8_t *ptr;
1332  uint16_t *ptr16;
1333  int n, h, v, x, y, c, j, linesize, dc;
1334  n = s->nb_blocks[i];
1335  c = s->comp_index[i];
1336  h = s->h_scount[i];
1337  v = s->v_scount[i];
1338  x = 0;
1339  y = 0;
1340  linesize = s->linesize[c];
1341 
1342  if(bits>8) linesize /= 2;
1343 
1344  for (j = 0; j < n; j++) {
1345  int pred;
1346 
1347  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1348  if(dc == 0xFFFFF)
1349  return -1;
1350  if ( h * mb_x + x >= s->width
1351  || v * mb_y + y >= s->height) {
1352  // Nothing to do
1353  } else if (bits<=8) {
1354  ptr = s->picture_ptr->data[c] +
1355  (linesize * (v * mb_y + y)) +
1356  (h * mb_x + x); //FIXME optimize this crap
1357  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1358 
1359  pred &= mask;
1360  *ptr = pred + ((unsigned)dc << point_transform);
1361  }else{
1362  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1363  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1364 
1365  pred &= mask;
1366  *ptr16= pred + ((unsigned)dc << point_transform);
1367  }
1368 
1369  if (++x == h) {
1370  x = 0;
1371  y++;
1372  }
1373  }
1374  }
1375  }
1376  if (s->restart_interval && !--s->restart_count) {
1377  align_get_bits(&s->gb);
1378  skip_bits(&s->gb, 16); /* skip RSTn */
1379  }
1380  }
1381  }
1382  return 0;
1383 }
1384 
1386  uint8_t *dst, const uint8_t *src,
1387  int linesize, int lowres)
1388 {
1389  switch (lowres) {
1390  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1391  break;
1392  case 1: copy_block4(dst, src, linesize, linesize, 4);
1393  break;
1394  case 2: copy_block2(dst, src, linesize, linesize, 2);
1395  break;
1396  case 3: *dst = *src;
1397  break;
1398  }
1399 }
1400 
1401 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1402 {
1403  int block_x, block_y;
1404  int size = 8 >> s->avctx->lowres;
1405  if (s->bits > 8) {
1406  for (block_y=0; block_y<size; block_y++)
1407  for (block_x=0; block_x<size; block_x++)
1408  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1409  } else {
1410  for (block_y=0; block_y<size; block_y++)
1411  for (block_x=0; block_x<size; block_x++)
1412  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1413  }
1414 }
1415 
1416 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1417  int Al, const uint8_t *mb_bitmask,
1418  int mb_bitmask_size,
1419  const AVFrame *reference)
1420 {
1421  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1423  const uint8_t *reference_data[MAX_COMPONENTS];
1424  int linesize[MAX_COMPONENTS];
1425  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1426  int bytes_per_pixel = 1 + (s->bits > 8);
1427 
1428  if (mb_bitmask) {
1429  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1430  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1431  return AVERROR_INVALIDDATA;
1432  }
1433  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1434  }
1435 
1436  s->restart_count = 0;
1437 
1438  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1439  &chroma_v_shift);
1440  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1441  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1442 
1443  for (i = 0; i < nb_components; i++) {
1444  int c = s->comp_index[i];
1445  data[c] = s->picture_ptr->data[c];
1446  reference_data[c] = reference ? reference->data[c] : NULL;
1447  linesize[c] = s->linesize[c];
1448  s->coefs_finished[c] |= 1;
1449  }
1450 
1451  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1452  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1453  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1454 
1455  if (s->restart_interval && !s->restart_count)
1456  s->restart_count = s->restart_interval;
1457 
1458  if (get_bits_left(&s->gb) < 0) {
1459  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1460  -get_bits_left(&s->gb));
1461  return AVERROR_INVALIDDATA;
1462  }
1463  for (i = 0; i < nb_components; i++) {
1464  uint8_t *ptr;
1465  int n, h, v, x, y, c, j;
1466  int block_offset;
1467  n = s->nb_blocks[i];
1468  c = s->comp_index[i];
1469  h = s->h_scount[i];
1470  v = s->v_scount[i];
1471  x = 0;
1472  y = 0;
1473  for (j = 0; j < n; j++) {
1474  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1475  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1476 
1477  if (s->interlaced && s->bottom_field)
1478  block_offset += linesize[c] >> 1;
1479  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1480  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1481  ptr = data[c] + block_offset;
1482  } else
1483  ptr = NULL;
1484  if (!s->progressive) {
1485  if (copy_mb) {
1486  if (ptr)
1487  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1488  linesize[c], s->avctx->lowres);
1489 
1490  } else {
1491  s->bdsp.clear_block(s->block);
1492  if (decode_block(s, s->block, i,
1493  s->dc_index[i], s->ac_index[i],
1494  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1495  av_log(s->avctx, AV_LOG_ERROR,
1496  "error y=%d x=%d\n", mb_y, mb_x);
1497  return AVERROR_INVALIDDATA;
1498  }
1499  if (ptr) {
1500  s->idsp.idct_put(ptr, linesize[c], s->block);
1501  if (s->bits & 7)
1502  shift_output(s, ptr, linesize[c]);
1503  }
1504  }
1505  } else {
1506  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1507  (h * mb_x + x);
1508  int16_t *block = s->blocks[c][block_idx];
1509  if (Ah)
1510  block[0] += get_bits1(&s->gb) *
1511  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1512  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1513  s->quant_matrixes[s->quant_sindex[i]],
1514  Al) < 0) {
1515  av_log(s->avctx, AV_LOG_ERROR,
1516  "error y=%d x=%d\n", mb_y, mb_x);
1517  return AVERROR_INVALIDDATA;
1518  }
1519  }
1520  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1521  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1522  mb_x, mb_y, x, y, c, s->bottom_field,
1523  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1524  if (++x == h) {
1525  x = 0;
1526  y++;
1527  }
1528  }
1529  }
1530 
1531  handle_rstn(s, nb_components);
1532  }
1533  }
1534  return 0;
1535 }
1536 
1538  int se, int Ah, int Al)
1539 {
1540  int mb_x, mb_y;
1541  int EOBRUN = 0;
1542  int c = s->comp_index[0];
1543  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1544 
1545  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1546  if (se < ss || se > 63) {
1547  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1548  return AVERROR_INVALIDDATA;
1549  }
1550 
1551  // s->coefs_finished is a bitmask for coefficients coded
1552  // ss and se are parameters telling start and end coefficients
1553  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1554 
1555  s->restart_count = 0;
1556 
1557  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1558  int block_idx = mb_y * s->block_stride[c];
1559  int16_t (*block)[64] = &s->blocks[c][block_idx];
1560  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1561  if (get_bits_left(&s->gb) <= 0) {
1562  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1563  return AVERROR_INVALIDDATA;
1564  }
1565  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1566  int ret;
1567  if (s->restart_interval && !s->restart_count)
1568  s->restart_count = s->restart_interval;
1569 
1570  if (Ah)
1571  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1572  quant_matrix, ss, se, Al, &EOBRUN);
1573  else
1574  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1575  quant_matrix, ss, se, Al, &EOBRUN);
1576 
1577  if (ret >= 0 && get_bits_left(&s->gb) < 0)
1579  if (ret < 0) {
1580  av_log(s->avctx, AV_LOG_ERROR,
1581  "error y=%d x=%d\n", mb_y, mb_x);
1582  return AVERROR_INVALIDDATA;
1583  }
1584 
1585  if (handle_rstn(s, 0))
1586  EOBRUN = 0;
1587  }
1588  }
1589  return 0;
1590 }
1591 
1593 {
1594  int mb_x, mb_y;
1595  int c;
1596  const int bytes_per_pixel = 1 + (s->bits > 8);
1597  const int block_size = s->lossless ? 1 : 8;
1598 
1599  for (c = 0; c < s->nb_components; c++) {
1600  uint8_t *data = s->picture_ptr->data[c];
1601  int linesize = s->linesize[c];
1602  int h = s->h_max / s->h_count[c];
1603  int v = s->v_max / s->v_count[c];
1604  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1605  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1606 
1607  if (~s->coefs_finished[c])
1608  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1609 
1610  if (s->interlaced && s->bottom_field)
1611  data += linesize >> 1;
1612 
1613  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1614  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1615  int block_idx = mb_y * s->block_stride[c];
1616  int16_t (*block)[64] = &s->blocks[c][block_idx];
1617  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1618  s->idsp.idct_put(ptr, linesize, *block);
1619  if (s->bits & 7)
1620  shift_output(s, ptr, linesize);
1621  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1622  }
1623  }
1624  }
1625 }
1626 
1628  int mb_bitmask_size, const AVFrame *reference)
1629 {
1630  int len, nb_components, i, h, v, predictor, point_transform;
1631  int index, id, ret;
1632  const int block_size = s->lossless ? 1 : 8;
1633  int ilv, prev_shift;
1634 
1635  if (!s->got_picture) {
1636  av_log(s->avctx, AV_LOG_WARNING,
1637  "Can not process SOS before SOF, skipping\n");
1638  return -1;
1639  }
1640 
1641  if (reference) {
1642  if (reference->width != s->picture_ptr->width ||
1643  reference->height != s->picture_ptr->height ||
1644  reference->format != s->picture_ptr->format) {
1645  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1646  return AVERROR_INVALIDDATA;
1647  }
1648  }
1649 
1650  /* XXX: verify len field validity */
1651  len = get_bits(&s->gb, 16);
1652  nb_components = get_bits(&s->gb, 8);
1653  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1655  "decode_sos: nb_components (%d)",
1656  nb_components);
1657  return AVERROR_PATCHWELCOME;
1658  }
1659  if (len != 6 + 2 * nb_components) {
1660  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1661  return AVERROR_INVALIDDATA;
1662  }
1663  for (i = 0; i < nb_components; i++) {
1664  id = get_bits(&s->gb, 8) - 1;
1665  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1666  /* find component index */
1667  for (index = 0; index < s->nb_components; index++)
1668  if (id == s->component_id[index])
1669  break;
1670  if (index == s->nb_components) {
1671  av_log(s->avctx, AV_LOG_ERROR,
1672  "decode_sos: index(%d) out of components\n", index);
1673  return AVERROR_INVALIDDATA;
1674  }
1675  /* Metasoft MJPEG codec has Cb and Cr swapped */
1676  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1677  && nb_components == 3 && s->nb_components == 3 && i)
1678  index = 3 - i;
1679 
1680  s->quant_sindex[i] = s->quant_index[index];
1681  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1682  s->h_scount[i] = s->h_count[index];
1683  s->v_scount[i] = s->v_count[index];
1684 
1685  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1686  index = (index+2)%3;
1687 
1688  s->comp_index[i] = index;
1689 
1690  s->dc_index[i] = get_bits(&s->gb, 4);
1691  s->ac_index[i] = get_bits(&s->gb, 4);
1692 
1693  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1694  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1695  goto out_of_range;
1696  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1697  goto out_of_range;
1698  }
1699 
1700  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1701  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1702  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1703  prev_shift = get_bits(&s->gb, 4); /* Ah */
1704  point_transform = get_bits(&s->gb, 4); /* Al */
1705  }else
1706  prev_shift = point_transform = 0;
1707 
1708  if (nb_components > 1) {
1709  /* interleaved stream */
1710  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1711  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1712  } else if (!s->ls) { /* skip this for JPEG-LS */
1713  h = s->h_max / s->h_scount[0];
1714  v = s->v_max / s->v_scount[0];
1715  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1716  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1717  s->nb_blocks[0] = 1;
1718  s->h_scount[0] = 1;
1719  s->v_scount[0] = 1;
1720  }
1721 
1722  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1723  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1724  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1725  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1726  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1727 
1728 
1729  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1730  for (i = s->mjpb_skiptosod; i > 0; i--)
1731  skip_bits(&s->gb, 8);
1732 
1733 next_field:
1734  for (i = 0; i < nb_components; i++)
1735  s->last_dc[i] = (4 << s->bits);
1736 
1737  if (s->avctx->hwaccel) {
1738  int bytes_to_start = get_bits_count(&s->gb) / 8;
1739  av_assert0(bytes_to_start >= 0 &&
1740  s->raw_scan_buffer_size >= bytes_to_start);
1741 
1742  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1743  s->raw_scan_buffer + bytes_to_start,
1744  s->raw_scan_buffer_size - bytes_to_start);
1745  if (ret < 0)
1746  return ret;
1747 
1748  } else if (s->lossless) {
1749  av_assert0(s->picture_ptr == s->picture);
1750  if (CONFIG_JPEGLS_DECODER && s->ls) {
1751 // for () {
1752 // reset_ls_coding_parameters(s, 0);
1753 
1755  point_transform, ilv)) < 0)
1756  return ret;
1757  } else {
1758  if (s->rgb || s->bayer) {
1759  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1760  return ret;
1761  } else {
1763  point_transform,
1764  nb_components)) < 0)
1765  return ret;
1766  }
1767  }
1768  } else {
1769  if (s->progressive && predictor) {
1770  av_assert0(s->picture_ptr == s->picture);
1772  ilv, prev_shift,
1773  point_transform)) < 0)
1774  return ret;
1775  } else {
1776  if ((ret = mjpeg_decode_scan(s, nb_components,
1777  prev_shift, point_transform,
1778  mb_bitmask, mb_bitmask_size, reference)) < 0)
1779  return ret;
1780  }
1781  }
1782 
1783  if (s->interlaced &&
1784  get_bits_left(&s->gb) > 32 &&
1785  show_bits(&s->gb, 8) == 0xFF) {
1786  GetBitContext bak = s->gb;
1787  align_get_bits(&bak);
1788  if (show_bits(&bak, 16) == 0xFFD1) {
1789  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1790  s->gb = bak;
1791  skip_bits(&s->gb, 16);
1792  s->bottom_field ^= 1;
1793 
1794  goto next_field;
1795  }
1796  }
1797 
1798  emms_c();
1799  return 0;
1800  out_of_range:
1801  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1802  return AVERROR_INVALIDDATA;
1803 }
1804 
1806 {
1807  if (get_bits(&s->gb, 16) != 4)
1808  return AVERROR_INVALIDDATA;
1809  s->restart_interval = get_bits(&s->gb, 16);
1810  s->restart_count = 0;
1811  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1812  s->restart_interval);
1813 
1814  return 0;
1815 }
1816 
1818 {
1819  int len, id, i;
1820 
1821  len = get_bits(&s->gb, 16);
1822  if (len < 6) {
1823  if (s->bayer) {
1824  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1825  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1826  skip_bits(&s->gb, len);
1827  return 0;
1828  } else
1829  return AVERROR_INVALIDDATA;
1830  }
1831  if (8 * len > get_bits_left(&s->gb))
1832  return AVERROR_INVALIDDATA;
1833 
1834  id = get_bits_long(&s->gb, 32);
1835  len -= 6;
1836 
1837  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1838  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1839  av_fourcc2str(av_bswap32(id)), id, len);
1840 
1841  /* Buggy AVID, it puts EOI only at every 10th frame. */
1842  /* Also, this fourcc is used by non-avid files too, it holds some
1843  information, but it's always present in AVID-created files. */
1844  if (id == AV_RB32("AVI1")) {
1845  /* structure:
1846  4bytes AVI1
1847  1bytes polarity
1848  1bytes always zero
1849  4bytes field_size
1850  4bytes field_size_less_padding
1851  */
1852  s->buggy_avid = 1;
1853  i = get_bits(&s->gb, 8); len--;
1854  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1855  goto out;
1856  }
1857 
1858  if (id == AV_RB32("JFIF")) {
1859  int t_w, t_h, v1, v2;
1860  if (len < 8)
1861  goto out;
1862  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1863  v1 = get_bits(&s->gb, 8);
1864  v2 = get_bits(&s->gb, 8);
1865  skip_bits(&s->gb, 8);
1866 
1867  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1868  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1869  if ( s->avctx->sample_aspect_ratio.num <= 0
1870  || s->avctx->sample_aspect_ratio.den <= 0) {
1871  s->avctx->sample_aspect_ratio.num = 0;
1872  s->avctx->sample_aspect_ratio.den = 1;
1873  }
1874 
1875  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1876  av_log(s->avctx, AV_LOG_INFO,
1877  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1878  v1, v2,
1879  s->avctx->sample_aspect_ratio.num,
1880  s->avctx->sample_aspect_ratio.den);
1881 
1882  len -= 8;
1883  if (len >= 2) {
1884  t_w = get_bits(&s->gb, 8);
1885  t_h = get_bits(&s->gb, 8);
1886  if (t_w && t_h) {
1887  /* skip thumbnail */
1888  if (len -10 - (t_w * t_h * 3) > 0)
1889  len -= t_w * t_h * 3;
1890  }
1891  len -= 2;
1892  }
1893  goto out;
1894  }
1895 
1896  if ( id == AV_RB32("Adob")
1897  && len >= 7
1898  && show_bits(&s->gb, 8) == 'e'
1899  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1900  skip_bits(&s->gb, 8); /* 'e' */
1901  skip_bits(&s->gb, 16); /* version */
1902  skip_bits(&s->gb, 16); /* flags0 */
1903  skip_bits(&s->gb, 16); /* flags1 */
1904  s->adobe_transform = get_bits(&s->gb, 8);
1905  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1906  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1907  len -= 7;
1908  goto out;
1909  }
1910 
1911  if (id == AV_RB32("LJIF")) {
1912  int rgb = s->rgb;
1913  int pegasus_rct = s->pegasus_rct;
1914  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1915  av_log(s->avctx, AV_LOG_INFO,
1916  "Pegasus lossless jpeg header found\n");
1917  skip_bits(&s->gb, 16); /* version ? */
1918  skip_bits(&s->gb, 16); /* unknown always 0? */
1919  skip_bits(&s->gb, 16); /* unknown always 0? */
1920  skip_bits(&s->gb, 16); /* unknown always 0? */
1921  switch (i=get_bits(&s->gb, 8)) {
1922  case 1:
1923  rgb = 1;
1924  pegasus_rct = 0;
1925  break;
1926  case 2:
1927  rgb = 1;
1928  pegasus_rct = 1;
1929  break;
1930  default:
1931  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1932  }
1933 
1934  len -= 9;
1935  if (s->got_picture)
1936  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1937  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1938  goto out;
1939  }
1940 
1941  s->rgb = rgb;
1942  s->pegasus_rct = pegasus_rct;
1943 
1944  goto out;
1945  }
1946  if (id == AV_RL32("colr") && len > 0) {
1947  s->colr = get_bits(&s->gb, 8);
1948  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1949  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1950  len --;
1951  goto out;
1952  }
1953  if (id == AV_RL32("xfrm") && len > 0) {
1954  s->xfrm = get_bits(&s->gb, 8);
1955  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1956  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1957  len --;
1958  goto out;
1959  }
1960 
1961  /* JPS extension by VRex */
1962  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1963  int flags, layout, type;
1964  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1965  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1966 
1967  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1968  skip_bits(&s->gb, 16); len -= 2; /* block length */
1969  skip_bits(&s->gb, 8); /* reserved */
1970  flags = get_bits(&s->gb, 8);
1971  layout = get_bits(&s->gb, 8);
1972  type = get_bits(&s->gb, 8);
1973  len -= 4;
1974 
1975  av_freep(&s->stereo3d);
1976  s->stereo3d = av_stereo3d_alloc();
1977  if (!s->stereo3d) {
1978  goto out;
1979  }
1980  if (type == 0) {
1981  s->stereo3d->type = AV_STEREO3D_2D;
1982  } else if (type == 1) {
1983  switch (layout) {
1984  case 0x01:
1985  s->stereo3d->type = AV_STEREO3D_LINES;
1986  break;
1987  case 0x02:
1988  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1989  break;
1990  case 0x03:
1991  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
1992  break;
1993  }
1994  if (!(flags & 0x04)) {
1995  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
1996  }
1997  }
1998  goto out;
1999  }
2000 
2001  /* EXIF metadata */
2002  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
2003  GetByteContext gbytes;
2004  int ret, le, ifd_offset, bytes_read;
2005  const uint8_t *aligned;
2006 
2007  skip_bits(&s->gb, 16); // skip padding
2008  len -= 2;
2009 
2010  // init byte wise reading
2011  aligned = align_get_bits(&s->gb);
2012  bytestream2_init(&gbytes, aligned, len);
2013 
2014  // read TIFF header
2015  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2016  if (ret) {
2017  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2018  } else {
2019  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2020 
2021  // read 0th IFD and store the metadata
2022  // (return values > 0 indicate the presence of subimage metadata)
2023  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2024  if (ret < 0) {
2025  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2026  }
2027  }
2028 
2029  bytes_read = bytestream2_tell(&gbytes);
2030  skip_bits(&s->gb, bytes_read << 3);
2031  len -= bytes_read;
2032 
2033  goto out;
2034  }
2035 
2036  /* Apple MJPEG-A */
2037  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2038  id = get_bits_long(&s->gb, 32);
2039  len -= 4;
2040  /* Apple MJPEG-A */
2041  if (id == AV_RB32("mjpg")) {
2042  /* structure:
2043  4bytes field size
2044  4bytes pad field size
2045  4bytes next off
2046  4bytes quant off
2047  4bytes huff off
2048  4bytes image off
2049  4bytes scan off
2050  4bytes data off
2051  */
2052  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2053  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2054  }
2055  }
2056 
2057  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2058  int id2;
2059  unsigned seqno;
2060  unsigned nummarkers;
2061 
2062  id = get_bits_long(&s->gb, 32);
2063  id2 = get_bits(&s->gb, 24);
2064  len -= 7;
2065  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2066  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2067  goto out;
2068  }
2069 
2070  skip_bits(&s->gb, 8);
2071  seqno = get_bits(&s->gb, 8);
2072  len -= 2;
2073  if (seqno == 0) {
2074  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2075  goto out;
2076  }
2077 
2078  nummarkers = get_bits(&s->gb, 8);
2079  len -= 1;
2080  if (nummarkers == 0) {
2081  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2082  goto out;
2083  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2084  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2085  goto out;
2086  } else if (seqno > nummarkers) {
2087  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2088  goto out;
2089  }
2090 
2091  /* Allocate if this is the first APP2 we've seen. */
2092  if (s->iccnum == 0) {
2093  if (!FF_ALLOCZ_TYPED_ARRAY(s->iccentries, nummarkers)) {
2094  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2095  return AVERROR(ENOMEM);
2096  }
2097  s->iccnum = nummarkers;
2098  }
2099 
2100  if (s->iccentries[seqno - 1].data) {
2101  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2102  goto out;
2103  }
2104 
2105  s->iccentries[seqno - 1].length = len;
2106  s->iccentries[seqno - 1].data = av_malloc(len);
2107  if (!s->iccentries[seqno - 1].data) {
2108  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2109  return AVERROR(ENOMEM);
2110  }
2111 
2112  memcpy(s->iccentries[seqno - 1].data, align_get_bits(&s->gb), len);
2113  skip_bits(&s->gb, len << 3);
2114  len = 0;
2115  s->iccread++;
2116 
2117  if (s->iccread > s->iccnum)
2118  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2119  }
2120 
2121 out:
2122  /* slow but needed for extreme adobe jpegs */
2123  if (len < 0)
2124  av_log(s->avctx, AV_LOG_ERROR,
2125  "mjpeg: error, decode_app parser read over the end\n");
2126  while (--len > 0)
2127  skip_bits(&s->gb, 8);
2128 
2129  return 0;
2130 }
2131 
2133 {
2134  int len = get_bits(&s->gb, 16);
2135  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2136  int i;
2137  char *cbuf = av_malloc(len - 1);
2138  if (!cbuf)
2139  return AVERROR(ENOMEM);
2140 
2141  for (i = 0; i < len - 2; i++)
2142  cbuf[i] = get_bits(&s->gb, 8);
2143  if (i > 0 && cbuf[i - 1] == '\n')
2144  cbuf[i - 1] = 0;
2145  else
2146  cbuf[i] = 0;
2147 
2148  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2149  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2150 
2151  /* buggy avid, it puts EOI only at every 10th frame */
2152  if (!strncmp(cbuf, "AVID", 4)) {
2153  parse_avid(s, cbuf, len);
2154  } else if (!strcmp(cbuf, "CS=ITU601"))
2155  s->cs_itu601 = 1;
2156  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2157  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2158  s->flipped = 1;
2159  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2160  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2161  s->multiscope = 2;
2162  }
2163 
2164  av_free(cbuf);
2165  }
2166 
2167  return 0;
2168 }
2169 
2170 /* return the 8 bit start code value and update the search
2171  state. Return -1 if no start code found */
2172 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2173 {
2174  const uint8_t *buf_ptr;
2175  unsigned int v, v2;
2176  int val;
2177  int skipped = 0;
2178 
2179  buf_ptr = *pbuf_ptr;
2180  while (buf_end - buf_ptr > 1) {
2181  v = *buf_ptr++;
2182  v2 = *buf_ptr;
2183  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2184  val = *buf_ptr++;
2185  goto found;
2186  }
2187  skipped++;
2188  }
2189  buf_ptr = buf_end;
2190  val = -1;
2191 found:
2192  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2193  *pbuf_ptr = buf_ptr;
2194  return val;
2195 }
2196 
2198  const uint8_t **buf_ptr, const uint8_t *buf_end,
2199  const uint8_t **unescaped_buf_ptr,
2200  int *unescaped_buf_size)
2201 {
2202  int start_code;
2203  start_code = find_marker(buf_ptr, buf_end);
2204 
2205  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2206  if (!s->buffer)
2207  return AVERROR(ENOMEM);
2208 
2209  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2210  if (start_code == SOS && !s->ls) {
2211  const uint8_t *src = *buf_ptr;
2212  const uint8_t *ptr = src;
2213  uint8_t *dst = s->buffer;
2214 
2215  #define copy_data_segment(skip) do { \
2216  ptrdiff_t length = (ptr - src) - (skip); \
2217  if (length > 0) { \
2218  memcpy(dst, src, length); \
2219  dst += length; \
2220  src = ptr; \
2221  } \
2222  } while (0)
2223 
2224  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2225  ptr = buf_end;
2226  copy_data_segment(0);
2227  } else {
2228  while (ptr < buf_end) {
2229  uint8_t x = *(ptr++);
2230 
2231  if (x == 0xff) {
2232  ptrdiff_t skip = 0;
2233  while (ptr < buf_end && x == 0xff) {
2234  x = *(ptr++);
2235  skip++;
2236  }
2237 
2238  /* 0xFF, 0xFF, ... */
2239  if (skip > 1) {
2240  copy_data_segment(skip);
2241 
2242  /* decrement src as it is equal to ptr after the
2243  * copy_data_segment macro and we might want to
2244  * copy the current value of x later on */
2245  src--;
2246  }
2247 
2248  if (x < RST0 || x > RST7) {
2249  copy_data_segment(1);
2250  if (x)
2251  break;
2252  }
2253  }
2254  }
2255  if (src < ptr)
2256  copy_data_segment(0);
2257  }
2258  #undef copy_data_segment
2259 
2260  *unescaped_buf_ptr = s->buffer;
2261  *unescaped_buf_size = dst - s->buffer;
2262  memset(s->buffer + *unescaped_buf_size, 0,
2264 
2265  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2266  (buf_end - *buf_ptr) - (dst - s->buffer));
2267  } else if (start_code == SOS && s->ls) {
2268  const uint8_t *src = *buf_ptr;
2269  uint8_t *dst = s->buffer;
2270  int bit_count = 0;
2271  int t = 0, b = 0;
2272  PutBitContext pb;
2273 
2274  /* find marker */
2275  while (src + t < buf_end) {
2276  uint8_t x = src[t++];
2277  if (x == 0xff) {
2278  while ((src + t < buf_end) && x == 0xff)
2279  x = src[t++];
2280  if (x & 0x80) {
2281  t -= FFMIN(2, t);
2282  break;
2283  }
2284  }
2285  }
2286  bit_count = t * 8;
2287  init_put_bits(&pb, dst, t);
2288 
2289  /* unescape bitstream */
2290  while (b < t) {
2291  uint8_t x = src[b++];
2292  put_bits(&pb, 8, x);
2293  if (x == 0xFF && b < t) {
2294  x = src[b++];
2295  if (x & 0x80) {
2296  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2297  x &= 0x7f;
2298  }
2299  put_bits(&pb, 7, x);
2300  bit_count--;
2301  }
2302  }
2303  flush_put_bits(&pb);
2304 
2305  *unescaped_buf_ptr = dst;
2306  *unescaped_buf_size = (bit_count + 7) >> 3;
2307  memset(s->buffer + *unescaped_buf_size, 0,
2309  } else {
2310  *unescaped_buf_ptr = *buf_ptr;
2311  *unescaped_buf_size = buf_end - *buf_ptr;
2312  }
2313 
2314  return start_code;
2315 }
2316 
2318 {
2319  int i;
2320 
2321  if (s->iccentries) {
2322  for (i = 0; i < s->iccnum; i++)
2323  av_freep(&s->iccentries[i].data);
2324  av_freep(&s->iccentries);
2325  }
2326 
2327  s->iccread = 0;
2328  s->iccnum = 0;
2329 }
2330 
2331 // SMV JPEG just stacks several output frames into one JPEG picture
2332 // we handle that by setting up the cropping parameters appropriately
2334 {
2335  MJpegDecodeContext *s = avctx->priv_data;
2336  int ret;
2337 
2338  if (s->smv_next_frame > 0) {
2339  av_assert0(s->smv_frame->buf[0]);
2341  ret = av_frame_ref(frame, s->smv_frame);
2342  if (ret < 0)
2343  return ret;
2344  } else {
2345  av_assert0(frame->buf[0]);
2346  av_frame_unref(s->smv_frame);
2347  ret = av_frame_ref(s->smv_frame, frame);
2348  if (ret < 0)
2349  return ret;
2350  }
2351 
2352  av_assert0((s->smv_next_frame + 1) * avctx->height <= avctx->coded_height);
2353 
2354  frame->width = avctx->coded_width;
2355  frame->height = avctx->coded_height;
2356  frame->crop_top = FFMIN(s->smv_next_frame * avctx->height, frame->height);
2357  frame->crop_bottom = frame->height - (s->smv_next_frame + 1) * avctx->height;
2358 
2359  s->smv_next_frame = (s->smv_next_frame + 1) % s->smv_frames_per_jpeg;
2360 
2361  if (s->smv_next_frame == 0)
2362  av_frame_unref(s->smv_frame);
2363 
2364  return 0;
2365 }
2366 
2368 {
2369  MJpegDecodeContext *s = avctx->priv_data;
2370  int ret;
2371 
2372  av_packet_unref(s->pkt);
2373  ret = ff_decode_get_packet(avctx, s->pkt);
2374  if (ret < 0)
2375  return ret;
2376 
2377 #if CONFIG_SP5X_DECODER || CONFIG_AMV_DECODER
2378  if (avctx->codec_id == AV_CODEC_ID_SP5X ||
2379  avctx->codec_id == AV_CODEC_ID_AMV) {
2380  ret = ff_sp5x_process_packet(avctx, s->pkt);
2381  if (ret < 0)
2382  return ret;
2383  }
2384 #endif
2385 
2386  s->buf_size = s->pkt->size;
2387 
2388  return 0;
2389 }
2390 
2392 {
2393  MJpegDecodeContext *s = avctx->priv_data;
2394  const uint8_t *buf_end, *buf_ptr;
2395  const uint8_t *unescaped_buf_ptr;
2396  int hshift, vshift;
2397  int unescaped_buf_size;
2398  int start_code;
2399  int i, index;
2400  int ret = 0;
2401  int is16bit;
2402 
2403  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG && s->smv_next_frame > 0)
2404  return smv_process_frame(avctx, frame);
2405 
2406  av_dict_free(&s->exif_metadata);
2407  av_freep(&s->stereo3d);
2408  s->adobe_transform = -1;
2409 
2410  if (s->iccnum != 0)
2412 
2413  ret = mjpeg_get_packet(avctx);
2414  if (ret < 0)
2415  return ret;
2416 
2417  buf_ptr = s->pkt->data;
2418  buf_end = s->pkt->data + s->pkt->size;
2419  while (buf_ptr < buf_end) {
2420  /* find start next marker */
2421  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2422  &unescaped_buf_ptr,
2423  &unescaped_buf_size);
2424  /* EOF */
2425  if (start_code < 0) {
2426  break;
2427  } else if (unescaped_buf_size > INT_MAX / 8) {
2428  av_log(avctx, AV_LOG_ERROR,
2429  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2430  start_code, unescaped_buf_size, s->pkt->size);
2431  return AVERROR_INVALIDDATA;
2432  }
2433  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2434  start_code, buf_end - buf_ptr);
2435 
2436  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2437 
2438  if (ret < 0) {
2439  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2440  goto fail;
2441  }
2442 
2443  s->start_code = start_code;
2444  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2445  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2446 
2447  /* process markers */
2448  if (start_code >= RST0 && start_code <= RST7) {
2449  av_log(avctx, AV_LOG_DEBUG,
2450  "restart marker: %d\n", start_code & 0x0f);
2451  /* APP fields */
2452  } else if (start_code >= APP0 && start_code <= APP15) {
2453  if ((ret = mjpeg_decode_app(s)) < 0)
2454  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2455  av_err2str(ret));
2456  /* Comment */
2457  } else if (start_code == COM) {
2458  ret = mjpeg_decode_com(s);
2459  if (ret < 0)
2460  return ret;
2461  } else if (start_code == DQT) {
2463  if (ret < 0)
2464  return ret;
2465  }
2466 
2467  ret = -1;
2468 
2469  if (!CONFIG_JPEGLS_DECODER &&
2470  (start_code == SOF48 || start_code == LSE)) {
2471  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2472  return AVERROR(ENOSYS);
2473  }
2474 
2475  if (avctx->skip_frame == AVDISCARD_ALL) {
2476  switch(start_code) {
2477  case SOF0:
2478  case SOF1:
2479  case SOF2:
2480  case SOF3:
2481  case SOF48:
2482  case SOI:
2483  case SOS:
2484  case EOI:
2485  break;
2486  default:
2487  goto skip;
2488  }
2489  }
2490 
2491  switch (start_code) {
2492  case SOI:
2493  s->restart_interval = 0;
2494  s->restart_count = 0;
2495  s->raw_image_buffer = buf_ptr;
2496  s->raw_image_buffer_size = buf_end - buf_ptr;
2497  /* nothing to do on SOI */
2498  break;
2499  case DHT:
2500  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2501  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2502  goto fail;
2503  }
2504  break;
2505  case SOF0:
2506  case SOF1:
2507  if (start_code == SOF0)
2508  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2509  else
2511  s->lossless = 0;
2512  s->ls = 0;
2513  s->progressive = 0;
2514  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2515  goto fail;
2516  break;
2517  case SOF2:
2518  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2519  s->lossless = 0;
2520  s->ls = 0;
2521  s->progressive = 1;
2522  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2523  goto fail;
2524  break;
2525  case SOF3:
2526  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2527  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2528  s->lossless = 1;
2529  s->ls = 0;
2530  s->progressive = 0;
2531  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2532  goto fail;
2533  break;
2534  case SOF48:
2535  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2536  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2537  s->lossless = 1;
2538  s->ls = 1;
2539  s->progressive = 0;
2540  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2541  goto fail;
2542  break;
2543  case LSE:
2544  if (!CONFIG_JPEGLS_DECODER ||
2545  (ret = ff_jpegls_decode_lse(s)) < 0)
2546  goto fail;
2547  break;
2548  case EOI:
2549 eoi_parser:
2550  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2551  s->progressive && s->cur_scan && s->got_picture)
2553  s->cur_scan = 0;
2554  if (!s->got_picture) {
2555  av_log(avctx, AV_LOG_WARNING,
2556  "Found EOI before any SOF, ignoring\n");
2557  break;
2558  }
2559  if (s->interlaced) {
2560  s->bottom_field ^= 1;
2561  /* if not bottom field, do not output image yet */
2562  if (s->bottom_field == !s->interlace_polarity)
2563  break;
2564  }
2565  if (avctx->skip_frame == AVDISCARD_ALL) {
2566  s->got_picture = 0;
2567  ret = AVERROR(EAGAIN);
2568  goto the_end_no_picture;
2569  }
2570  if (s->avctx->hwaccel) {
2571  ret = s->avctx->hwaccel->end_frame(s->avctx);
2572  if (ret < 0)
2573  return ret;
2574 
2575  av_freep(&s->hwaccel_picture_private);
2576  }
2577  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2578  return ret;
2579  s->got_picture = 0;
2580 
2581  frame->pkt_dts = s->pkt->dts;
2582 
2583  if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
2584  int qp = FFMAX3(s->qscale[0],
2585  s->qscale[1],
2586  s->qscale[2]);
2587 
2588  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2589  }
2590 
2591  goto the_end;
2592  case SOS:
2593  s->raw_scan_buffer = buf_ptr;
2594  s->raw_scan_buffer_size = buf_end - buf_ptr;
2595 
2596  s->cur_scan++;
2597  if (avctx->skip_frame == AVDISCARD_ALL) {
2598  skip_bits(&s->gb, get_bits_left(&s->gb));
2599  break;
2600  }
2601 
2602  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2603  (avctx->err_recognition & AV_EF_EXPLODE))
2604  goto fail;
2605  break;
2606  case DRI:
2607  if ((ret = mjpeg_decode_dri(s)) < 0)
2608  return ret;
2609  break;
2610  case SOF5:
2611  case SOF6:
2612  case SOF7:
2613  case SOF9:
2614  case SOF10:
2615  case SOF11:
2616  case SOF13:
2617  case SOF14:
2618  case SOF15:
2619  case JPG:
2620  av_log(avctx, AV_LOG_ERROR,
2621  "mjpeg: unsupported coding type (%x)\n", start_code);
2622  break;
2623  }
2624 
2625 skip:
2626  /* eof process start code */
2627  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2628  av_log(avctx, AV_LOG_DEBUG,
2629  "marker parser used %d bytes (%d bits)\n",
2630  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2631  }
2632  if (s->got_picture && s->cur_scan) {
2633  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2634  goto eoi_parser;
2635  }
2636  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2637  return AVERROR_INVALIDDATA;
2638 fail:
2639  s->got_picture = 0;
2640  return ret;
2641 the_end:
2642 
2643  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2644 
2645  if (AV_RB32(s->upscale_h)) {
2646  int p;
2648  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2649  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2650  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2651  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2652  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2653  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2654  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2655  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2656  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2657  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2658  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2659  );
2660  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2661  if (ret)
2662  return ret;
2663 
2664  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2665  for (p = 0; p<s->nb_components; p++) {
2666  uint8_t *line = s->picture_ptr->data[p];
2667  int w = s->width;
2668  int h = s->height;
2669  if (!s->upscale_h[p])
2670  continue;
2671  if (p==1 || p==2) {
2672  w = AV_CEIL_RSHIFT(w, hshift);
2673  h = AV_CEIL_RSHIFT(h, vshift);
2674  }
2675  if (s->upscale_v[p] == 1)
2676  h = (h+1)>>1;
2677  av_assert0(w > 0);
2678  for (i = 0; i < h; i++) {
2679  if (s->upscale_h[p] == 1) {
2680  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2681  else line[w - 1] = line[(w - 1) / 2];
2682  for (index = w - 2; index > 0; index--) {
2683  if (is16bit)
2684  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2685  else
2686  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2687  }
2688  } else if (s->upscale_h[p] == 2) {
2689  if (is16bit) {
2690  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2691  if (w > 1)
2692  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2693  } else {
2694  line[w - 1] = line[(w - 1) / 3];
2695  if (w > 1)
2696  line[w - 2] = line[w - 1];
2697  }
2698  for (index = w - 3; index > 0; index--) {
2699  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2700  }
2701  }
2702  line += s->linesize[p];
2703  }
2704  }
2705  }
2706  if (AV_RB32(s->upscale_v)) {
2707  int p;
2709  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2710  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2711  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2712  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2713  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2714  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2715  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2716  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2717  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2718  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2719  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2720  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2721  );
2722  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2723  if (ret)
2724  return ret;
2725 
2726  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2727  for (p = 0; p < s->nb_components; p++) {
2728  uint8_t *dst;
2729  int w = s->width;
2730  int h = s->height;
2731  if (!s->upscale_v[p])
2732  continue;
2733  if (p==1 || p==2) {
2734  w = AV_CEIL_RSHIFT(w, hshift);
2735  h = AV_CEIL_RSHIFT(h, vshift);
2736  }
2737  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2738  for (i = h - 1; i; i--) {
2739  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2740  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2741  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2742  memcpy(dst, src1, w);
2743  } else {
2744  for (index = 0; index < w; index++)
2745  dst[index] = (src1[index] + src2[index]) >> 1;
2746  }
2747  dst -= s->linesize[p];
2748  }
2749  }
2750  }
2751  if (s->flipped && !s->rgb) {
2752  int j;
2753  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2754  if (ret)
2755  return ret;
2756 
2757  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2758  for (index=0; index<s->nb_components; index++) {
2759  uint8_t *dst = s->picture_ptr->data[index];
2760  int w = s->picture_ptr->width;
2761  int h = s->picture_ptr->height;
2762  if(index && index<3){
2763  w = AV_CEIL_RSHIFT(w, hshift);
2764  h = AV_CEIL_RSHIFT(h, vshift);
2765  }
2766  if(dst){
2767  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2768  for (i=0; i<h/2; i++) {
2769  for (j=0; j<w; j++)
2770  FFSWAP(int, dst[j], dst2[j]);
2771  dst += s->picture_ptr->linesize[index];
2772  dst2 -= s->picture_ptr->linesize[index];
2773  }
2774  }
2775  }
2776  }
2777  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2778  int w = s->picture_ptr->width;
2779  int h = s->picture_ptr->height;
2780  av_assert0(s->nb_components == 4);
2781  for (i=0; i<h; i++) {
2782  int j;
2783  uint8_t *dst[4];
2784  for (index=0; index<4; index++) {
2785  dst[index] = s->picture_ptr->data[index]
2786  + s->picture_ptr->linesize[index]*i;
2787  }
2788  for (j=0; j<w; j++) {
2789  int k = dst[3][j];
2790  int r = dst[0][j] * k;
2791  int g = dst[1][j] * k;
2792  int b = dst[2][j] * k;
2793  dst[0][j] = g*257 >> 16;
2794  dst[1][j] = b*257 >> 16;
2795  dst[2][j] = r*257 >> 16;
2796  dst[3][j] = 255;
2797  }
2798  }
2799  }
2800  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2801  int w = s->picture_ptr->width;
2802  int h = s->picture_ptr->height;
2803  av_assert0(s->nb_components == 4);
2804  for (i=0; i<h; i++) {
2805  int j;
2806  uint8_t *dst[4];
2807  for (index=0; index<4; index++) {
2808  dst[index] = s->picture_ptr->data[index]
2809  + s->picture_ptr->linesize[index]*i;
2810  }
2811  for (j=0; j<w; j++) {
2812  int k = dst[3][j];
2813  int r = (255 - dst[0][j]) * k;
2814  int g = (128 - dst[1][j]) * k;
2815  int b = (128 - dst[2][j]) * k;
2816  dst[0][j] = r*257 >> 16;
2817  dst[1][j] = (g*257 >> 16) + 128;
2818  dst[2][j] = (b*257 >> 16) + 128;
2819  dst[3][j] = 255;
2820  }
2821  }
2822  }
2823 
2824  if (s->stereo3d) {
2826  if (stereo) {
2827  stereo->type = s->stereo3d->type;
2828  stereo->flags = s->stereo3d->flags;
2829  }
2830  av_freep(&s->stereo3d);
2831  }
2832 
2833  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2834  AVFrameSideData *sd;
2835  size_t offset = 0;
2836  int total_size = 0;
2837  int i;
2838 
2839  /* Sum size of all parts. */
2840  for (i = 0; i < s->iccnum; i++)
2841  total_size += s->iccentries[i].length;
2842 
2844  if (!sd) {
2845  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2846  return AVERROR(ENOMEM);
2847  }
2848 
2849  /* Reassemble the parts, which are now in-order. */
2850  for (i = 0; i < s->iccnum; i++) {
2851  memcpy(sd->data + offset, s->iccentries[i].data, s->iccentries[i].length);
2852  offset += s->iccentries[i].length;
2853  }
2854  }
2855 
2856  av_dict_copy(&frame->metadata, s->exif_metadata, 0);
2857  av_dict_free(&s->exif_metadata);
2858 
2859  if (avctx->codec_id == AV_CODEC_ID_SMVJPEG) {
2860  ret = smv_process_frame(avctx, frame);
2861  if (ret < 0) {
2863  return ret;
2864  }
2865  }
2866  if ((avctx->codec_tag == MKTAG('A', 'V', 'R', 'n') ||
2867  avctx->codec_tag == MKTAG('A', 'V', 'D', 'J')) &&
2868  avctx->coded_height > s->orig_height) {
2869  frame->height = AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres);
2870  frame->crop_top = frame->height - avctx->height;
2871  }
2872 
2873  ret = 0;
2874 
2875 the_end_no_picture:
2876  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2877  buf_end - buf_ptr);
2878 
2879  return ret;
2880 }
2881 
2882 /* mxpeg may call the following function (with a blank MJpegDecodeContext)
2883  * even without having called ff_mjpeg_decode_init(). */
2885 {
2886  MJpegDecodeContext *s = avctx->priv_data;
2887  int i, j;
2888 
2889  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2890  av_log(avctx, AV_LOG_INFO, "Single field\n");
2891  }
2892 
2893  if (s->picture) {
2894  av_frame_free(&s->picture);
2895  s->picture_ptr = NULL;
2896  } else if (s->picture_ptr)
2897  av_frame_unref(s->picture_ptr);
2898 
2899  av_packet_free(&s->pkt);
2900 
2901  av_frame_free(&s->smv_frame);
2902 
2903  av_freep(&s->buffer);
2904  av_freep(&s->stereo3d);
2905  av_freep(&s->ljpeg_buffer);
2906  s->ljpeg_buffer_size = 0;
2907 
2908  for (i = 0; i < 3; i++) {
2909  for (j = 0; j < 4; j++)
2910  ff_free_vlc(&s->vlcs[i][j]);
2911  }
2912  for (i = 0; i < MAX_COMPONENTS; i++) {
2913  av_freep(&s->blocks[i]);
2914  av_freep(&s->last_nnz[i]);
2915  }
2916  av_dict_free(&s->exif_metadata);
2917 
2919 
2920  av_freep(&s->hwaccel_picture_private);
2921 
2922  return 0;
2923 }
2924 
2925 static void decode_flush(AVCodecContext *avctx)
2926 {
2927  MJpegDecodeContext *s = avctx->priv_data;
2928  s->got_picture = 0;
2929 
2930  s->smv_next_frame = 0;
2931  av_frame_unref(s->smv_frame);
2932 }
2933 
2934 #if CONFIG_MJPEG_DECODER
2935 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2936 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2937 static const AVOption options[] = {
2938  { "extern_huff", "Use external huffman table.",
2939  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2940  { NULL },
2941 };
2942 
2943 static const AVClass mjpegdec_class = {
2944  .class_name = "MJPEG decoder",
2945  .item_name = av_default_item_name,
2946  .option = options,
2947  .version = LIBAVUTIL_VERSION_INT,
2948 };
2949 
2951  .name = "mjpeg",
2952  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2953  .type = AVMEDIA_TYPE_VIDEO,
2954  .id = AV_CODEC_ID_MJPEG,
2955  .priv_data_size = sizeof(MJpegDecodeContext),
2957  .close = ff_mjpeg_decode_end,
2959  .flush = decode_flush,
2960  .capabilities = AV_CODEC_CAP_DR1,
2961  .max_lowres = 3,
2962  .priv_class = &mjpegdec_class,
2966  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2967 #if CONFIG_MJPEG_NVDEC_HWACCEL
2968  HWACCEL_NVDEC(mjpeg),
2969 #endif
2970 #if CONFIG_MJPEG_VAAPI_HWACCEL
2971  HWACCEL_VAAPI(mjpeg),
2972 #endif
2973  NULL
2974  },
2975 };
2976 #endif
2977 #if CONFIG_THP_DECODER
2979  .name = "thp",
2980  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2981  .type = AVMEDIA_TYPE_VIDEO,
2982  .id = AV_CODEC_ID_THP,
2983  .priv_data_size = sizeof(MJpegDecodeContext),
2985  .close = ff_mjpeg_decode_end,
2987  .flush = decode_flush,
2988  .capabilities = AV_CODEC_CAP_DR1,
2989  .max_lowres = 3,
2992 };
2993 #endif
2994 
2995 #if CONFIG_SMVJPEG_DECODER
2997  .name = "smvjpeg",
2998  .long_name = NULL_IF_CONFIG_SMALL("SMV JPEG"),
2999  .type = AVMEDIA_TYPE_VIDEO,
3000  .id = AV_CODEC_ID_SMVJPEG,
3001  .priv_data_size = sizeof(MJpegDecodeContext),
3003  .close = ff_mjpeg_decode_end,
3005  .flush = decode_flush,
3006  .capabilities = AV_CODEC_CAP_DR1,
3009 };
3010 #endif
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:103
hwconfig.h
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1680
AVCodec
AVCodec.
Definition: codec.h:197
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
ff_decode_get_packet
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:222
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:41
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
FF_CODEC_CAP_SETS_PKT_DTS
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:56
mjpeg.h
level
uint8_t level
Definition: svq3.c:206
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1164
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1326
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:108
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1385
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2925
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:947
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:478
SOF0
@ SOF0
Definition: mjpeg.h:39
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1645
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:706
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
Definition: frame.c:726
avpriv_mjpeg_bits_ac_luminance
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
ff_smvjpeg_decoder
AVCodec ff_smvjpeg_decoder
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:57
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:275
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:108
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:190
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
avpriv_mjpeg_val_ac_luminance
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
AVFrame::width
int width
Definition: frame.h:376
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:441
w
uint8_t w
Definition: llviddspenc.c:39
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:586
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1962
smv_process_frame
static int smv_process_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2333
internal.h
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1193
AVOption
AVOption.
Definition: opt.h:248
b
#define b
Definition: input.c:41
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:781
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:142
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
mjpeg_get_packet
static int mjpeg_get_packet(AVCodecContext *avctx)
Definition: mjpegdec.c:2367
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:149
avpriv_mjpeg_bits_dc_luminance
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1624
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:518
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
rgb
Definition: rpzaenc.c:58
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:236
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1231
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1401
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:545
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:117
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2006
fail
#define fail()
Definition: checkasm.h:133
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:443
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1958
GetBitContext
Definition: get_bits.h:61
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2132
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:54
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:616
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:724
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
ff_sp5x_process_packet
int ff_sp5x_process_packet(AVCodecContext *avctx, AVPacket *avpkt)
Definition: sp5xdec.c:33
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:61
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
av_bswap32
#define av_bswap32
Definition: bswap.h:33
avpriv_mjpeg_bits_dc_chrominance
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:169
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:115
aligned
static int aligned(int val)
Definition: dashdec.c:168
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:848
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:2184
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1960
mask
static const uint16_t mask[17]
Definition: lzw.c:38
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1030
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:638
width
#define width
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:104
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:97
avpriv_mjpeg_val_dc
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1961
g
const char * g
Definition: vf_curves.c:117
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:350
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: codec_par.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:169
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
RST0
@ RST0
Definition: mjpeg.h:61
decode.h
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2317
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2884
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:431
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:44
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:546
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
ff_mjpeg_receive_frame
int ff_mjpeg_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: mjpegdec.c:2391
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:418
ff_thp_decoder
AVCodec ff_thp_decoder
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:389
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_CODEC_ID_SP5X
@ AV_CODEC_ID_SP5X
Definition: codec_id.h:59
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
av_clip_int16
#define av_clip_int16
Definition: common.h:137
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:390
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:592
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1592
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
run
uint8_t run
Definition: svq3.c:205
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:195
SOF13
@ SOF13
Definition: mjpeg.h:52
receive_frame
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
Definition: crystalhd.c:560
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
src
#define src
Definition: vp8dsp.c:255
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
MJpegDecodeContext
Definition: mjpegdec.h:52
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1416
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:965
lowres
static int lowres
Definition: ffplay.c:336
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1537
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1656
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:67
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
avpriv_mjpeg_val_ac_chrominance
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
copy_data_segment
#define copy_data_segment(skip)
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1754
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:506
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1900
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1065
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:325
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:866
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1627
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
size
int size
Definition: twinvq_data.h:10344
AV_CODEC_ID_SMVJPEG
@ AV_CODEC_ID_SMVJPEG
Definition: codec_id.h:263
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:222
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:391
AVCodecHWConfigInternal
Definition: hwconfig.h:29
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
VD
#define VD
Definition: av1dec.c:1208
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
predictor
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:163
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2172
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
DQT
@ DQT
Definition: mjpeg.h:73
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
AVCodec::id
enum AVCodecID id
Definition: codec.h:211
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
src1
#define src1
Definition: h264pred.c:140
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:166
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
interlaced
uint8_t interlaced
Definition: mxfenc.c:2208
i
int i
Definition: input.c:407
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:799
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1959
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1805
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:128
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1631
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1178
len
int len
Definition: vorbis_enc_data.h:452
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::height
int height
Definition: avcodec.h:709
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:569
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:939
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
pos
unsigned int pos
Definition: spdifenc.c:412
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1628
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2197
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:156
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:536
AVFrame::height
int height
Definition: frame.h:376
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:608
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
profiles
static const AVProfile profiles[]
Definition: libfdk-aacenc.c:428
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1623
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:297
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:724
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:84
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1817
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1227
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:220
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:110
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:561
ff_mjpeg_build_vlc
int ff_mjpeg_build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int is_ac, void *logctx)
Definition: mjpegdec_common.c:41
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:45
rgb
static const SheerTable rgb[2]
Definition: sheervideodata.h:32
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
avpriv_mjpeg_bits_ac_chrominance
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
ff_mjpeg_decoder
AVCodec ff_mjpeg_decoder
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
re
float re
Definition: fft.c:82